aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/proc-pid-oom_adj22
-rw-r--r--Documentation/android.txt121
-rw-r--r--Documentation/cpu-freq/governors.txt76
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt3
-rw-r--r--Documentation/filesystems/proc.txt3
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig94
-rw-r--r--arch/arm/Kconfig.debug21
-rw-r--r--arch/arm/boot/compressed/head.S3
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi18
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi36
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi36
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi26
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi32
-rw-r--r--arch/arm/common/Kconfig50
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/fiq_debugger.c1370
-rw-r--r--arch/arm/common/fiq_debugger_ringbuf.h94
-rw-r--r--arch/arm/common/fiq_glue.S111
-rw-r--r--arch/arm/common/fiq_glue_setup.c100
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/fiq_debugger.h64
-rw-r--r--arch/arm/include/asm/fiq_glue.h30
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h50
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/irq.h3
-rw-r--r--arch/arm/include/asm/mach/mmc.h28
-rw-r--r--arch/arm/include/asm/pmu.h12
-rw-r--r--arch/arm/include/asm/rodata.h32
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/topology.h34
-rw-r--r--arch/arm/kernel/etm.c680
-rw-r--r--arch/arm/kernel/ftrace.c15
-rw-r--r--arch/arm/kernel/hw_breakpoint.c56
-rw-r--r--arch/arm/kernel/perf_event.c19
-rw-r--r--arch/arm/kernel/perf_event_cpu.c117
-rw-r--r--arch/arm/kernel/perf_event_v7.c57
-rw-r--r--arch/arm/kernel/process.c119
-rw-r--r--arch/arm/kernel/smp.c58
-rw-r--r--arch/arm/kernel/topology.c120
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp-pxa27x.h3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c4
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/mmu.c83
-rw-r--r--arch/arm/mm/rodata.c159
-rw-r--r--arch/arm/vfp/entry.S1
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/include/asm/pgtable.h36
-rw-r--r--arch/arm64/include/asm/unistd32.h7
-rw-r--r--arch/arm64/kernel/vdso.c4
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/s390/Makefile4
-rw-r--r--arch/s390/include/asm/dma.h6
-rw-r--r--arch/s390/include/asm/io.h5
-rw-r--r--arch/s390/include/asm/irq.h78
-rw-r--r--arch/s390/include/asm/pgtable.h5
-rw-r--r--arch/s390/include/asm/timex.h28
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/debug.c11
-rw-r--r--arch/s390/kernel/irq.c124
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c12
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c2
-rw-r--r--arch/s390/pci/pci.c33
-rw-r--r--arch/s390/pci/pci_dma.c2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c10
-rw-r--r--arch/sh/include/asm/elf.h4
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/processor_64.h2
-rw-r--r--arch/sh/include/uapi/asm/unistd_32.h3
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h3
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/syscalls_64.S1
-rw-r--r--arch/sh/lib/mcount.S2
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/pci.c12
-rw-r--r--arch/sparc/kernel/pci_psycho.c3
-rw-r--r--arch/sparc/kernel/pci_sabre.c3
-rw-r--r--arch/sparc/kernel/pci_schizo.c5
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/include/asm/idle.h7
-rw-r--r--arch/x86/kernel/kvm.c12
-rw-r--r--arch/x86/kernel/process.c17
-rw-r--r--arch/x86/kernel/setup.c80
-rw-r--r--arch/x86/kvm/x86.c24
-rw-r--r--block/genhd.c17
-rw-r--r--block/partition-generic.c11
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/glue.c2
-rw-r--r--drivers/base/Kconfig26
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/power/main.c44
-rw-r--r--drivers/base/regmap/regmap-debugfs.c53
-rw-r--r--drivers/base/sw_sync.c262
-rw-r--r--drivers/base/sync.c1009
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/dcc_tty.c326
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/cpufreq/Kconfig27
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c321
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c1066
-rw-r--r--drivers/cpufreq/cpufreq_stats.c52
-rw-r--r--drivers/cpufreq/freq_table.c9
-rw-r--r--drivers/cpuidle/cpuidle.c17
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governors/menu.c15
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/gator/Kconfig33
-rw-r--r--drivers/gator/LICENSE339
-rw-r--r--drivers/gator/Makefile71
-rw-r--r--drivers/gator/gator.h121
-rw-r--r--drivers/gator/gator_annotate.c168
-rw-r--r--drivers/gator/gator_annotate_kernel.c157
-rw-r--r--drivers/gator/gator_backtrace.c127
-rw-r--r--drivers/gator/gator_cookies.c397
-rw-r--r--drivers/gator/gator_events.sh19
-rw-r--r--drivers/gator/gator_events_armv6.c244
-rw-r--r--drivers/gator/gator_events_armv7.c319
-rw-r--r--drivers/gator/gator_events_block.c160
-rw-r--r--drivers/gator/gator_events_irq.c189
-rw-r--r--drivers/gator/gator_events_l2c-310.c177
-rw-r--r--drivers/gator/gator_events_mali_400.c738
-rw-r--r--drivers/gator/gator_events_mali_400.h18
-rw-r--r--drivers/gator/gator_events_mali_common.c74
-rw-r--r--drivers/gator/gator_events_mali_common.h88
-rw-r--r--drivers/gator/gator_events_mali_t6xx.c512
-rw-r--r--drivers/gator/gator_events_mali_t6xx_hw.c722
-rw-r--r--drivers/gator/gator_events_mali_t6xx_hw_test.c55
-rw-r--r--drivers/gator/gator_events_meminfo.c240
-rw-r--r--drivers/gator/gator_events_mmaped.c229
-rw-r--r--drivers/gator/gator_events_net.c171
-rw-r--r--drivers/gator/gator_events_perf_pmu.c298
-rw-r--r--drivers/gator/gator_events_sched.c115
-rw-r--r--drivers/gator/gator_events_scorpion.c676
-rw-r--r--drivers/gator/gator_fs.c295
-rw-r--r--drivers/gator/gator_hrtimer_gator.c97
-rw-r--r--drivers/gator/gator_hrtimer_perf.c113
-rw-r--r--drivers/gator/gator_main.c1254
-rw-r--r--drivers/gator/gator_marshaling.c338
-rw-r--r--drivers/gator/gator_pack.c185
-rw-r--r--drivers/gator/gator_trace_gpu.c219
-rw-r--r--drivers/gator/gator_trace_gpu.h79
-rw-r--r--drivers/gator/gator_trace_power.c188
-rw-r--r--drivers/gator/gator_trace_sched.c208
-rw-r--r--drivers/gator/mali_t6xx.mk25
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/drm_mm.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c25
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c17
-rw-r--r--drivers/gpu/ion/Kconfig13
-rw-r--r--drivers/gpu/ion/Makefile3
-rw-r--r--drivers/gpu/ion/ion.c1358
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c182
-rw-r--r--drivers/gpu/ion/ion_heap.c72
-rw-r--r--drivers/gpu/ion/ion_page_pool.c280
-rw-r--r--drivers/gpu/ion/ion_priv.h257
-rw-r--r--drivers/gpu/ion/ion_system_heap.c494
-rw-r--r--drivers/gpu/ion/ion_system_mapper.c114
-rw-r--r--drivers/gpu/ion/tegra/Makefile1
-rw-r--r--drivers/gpu/ion/tegra/tegra_ion.c96
-rw-r--r--drivers/hid/hid-input.c10
-rw-r--r--drivers/hid/hid-magicmouse.c15
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hwmon/vexpress.c1
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/adc/ad7266.c6
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/max1363.c13
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig13
-rw-r--r--drivers/iio/common/hid-sensors/Makefile3
-rw-r--r--drivers/iio/dac/ad5380.c6
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad5504.c6
-rw-r--r--drivers/iio/dac/ad5624r_spi.c6
-rw-r--r--drivers/iio/dac/ad5686.c6
-rw-r--r--drivers/iio/dac/ad5791.c13
-rw-r--r--drivers/iio/frequency/adf4350.c2
-rw-r--r--drivers/iio/gyro/Kconfig1
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/input/Kconfig9
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c55
-rw-r--r--drivers/input/keyreset.c239
-rw-r--r--drivers/input/misc/Kconfig16
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/gpio_axis.c192
-rw-r--r--drivers/input/misc/gpio_event.c239
-rw-r--r--drivers/input/misc/gpio_input.c376
-rw-r--r--drivers/input/misc/gpio_matrix.c441
-rw-r--r--drivers/input/misc/gpio_output.c97
-rw-r--r--drivers/input/misc/keychord.c387
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c675
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/ledtrig-cpu.c25
-rw-r--r--drivers/leds/ledtrig-sleep.c80
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/misc/Kconfig20
-rw-r--r--drivers/misc/Makefile4
-rw-r--r--drivers/misc/akm8975.c732
-rw-r--r--drivers/misc/atmel-ssc.c8
-rw-r--r--drivers/misc/mei/amthif.c6
-rw-r--r--drivers/misc/uid_stat.c156
-rw-r--r--drivers/misc/wl127x-rfkill.c121
-rw-r--r--drivers/mmc/card/Kconfig9
-rw-r--r--drivers/mmc/card/block.c32
-rw-r--r--drivers/mmc/core/Kconfig17
-rw-r--r--drivers/mmc/core/core.c94
-rw-r--r--drivers/mmc/core/host.c10
-rw-r--r--drivers/mmc/core/sd.c86
-rw-r--r--drivers/mmc/core/sdio.c149
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rwxr-xr-x[-rw-r--r--]drivers/mmc/core/sdio_io.c33
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ppp/Kconfig17
-rw-r--r--drivers/net/ppp/Makefile2
-rw-r--r--drivers/net/ppp/pppolac.c449
-rw-r--r--drivers/net/ppp/pppopns.c428
-rw-r--r--drivers/net/tun.c50
-rw-r--r--drivers/net/wireless/Kconfig6
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig29
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c573
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h30
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c603
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c471
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c407
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c157
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c223
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c871
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h362
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h363
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c975
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1116
-rw-r--r--drivers/net/wireless/b43/b43.h5
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/main.h5
-rw-r--r--drivers/net/wireless/bcmdhd/Kconfig47
-rw-r--r--drivers/net/wireless/bcmdhd/Makefile39
-rw-r--r--drivers/net/wireless/bcmdhd/aiutils.c836
-rw-r--r--drivers/net/wireless/bcmdhd/bcmevent.c148
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh.c726
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_linux.c742
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c1432
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c402
-rw-r--r--drivers/net/wireless/bcmdhd/bcmutils.c2093
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_channels.c936
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_channels.h345
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_rates.h306
-rw-r--r--drivers/net/wireless/bcmdhd/dhd.h789
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.c338
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.h39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bus.h109
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cdc.c2838
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cfg80211.c673
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cfg80211.h44
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_common.c2337
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_custom_gpio.c293
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_dbg.h110
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux.c5331
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux_sched.c39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_proto.h109
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_sdio.c6962
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_wlfc.h278
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_stats.h43
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_wlhdr.h40
-rw-r--r--drivers/net/wireless/bcmdhd/hndpmu.c197
-rw-r--r--drivers/net/wireless/bcmdhd/include/Makefile53
-rw-r--r--drivers/net/wireless/bcmdhd/include/aidmp.h375
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_android_types.h44
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_cfg.h29
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h361
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmcdc.h126
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdefs.h239
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdevs.h487
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmendian.h278
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmnvram.h179
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmpcispi.h181
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmperf.h36
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdbus.h131
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh.h238
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h123
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdpcm.h274
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdspi.h135
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdstd.h248
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmspi.h40
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmspibrcm.h139
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h607
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h900
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmutils.h775
-rw-r--r--drivers/net/wireless/bcmdhd/include/dbus.h571
-rw-r--r--drivers/net/wireless/bcmdhd/include/dhdioctl.h135
-rw-r--r--drivers/net/wireless/bcmdhd/include/epivers.h48
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndpmu.h36
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h88
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_cons.h67
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndsoc.h235
-rw-r--r--drivers/net/wireless/bcmdhd/include/linux_osl.h422
-rw-r--r--drivers/net/wireless/bcmdhd/include/linuxver.h608
-rw-r--r--drivers/net/wireless/bcmdhd/include/miniopt.h77
-rw-r--r--drivers/net/wireless/bcmdhd/include/msgtrace.h74
-rw-r--r--drivers/net/wireless/bcmdhd/include/osl.h88
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_end.h53
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_start.h60
-rw-r--r--drivers/net/wireless/bcmdhd/include/pcicfg.h90
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11.h2253
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h45
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11e.h131
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.1d.h48
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmeth.h82
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmevent.h329
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmip.h210
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h104
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h441
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/eapol.h193
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/ethernet.h162
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/p2p.h564
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/sdspi.h75
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/vlan.h69
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/wpa.h203
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/wps.h379
-rw-r--r--drivers/net/wireless/bcmdhd/include/rwl_wifi.h96
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbchipc.h2233
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbconfig.h275
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbhnddma.h370
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbpcmcia.h108
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdio.h187
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h293
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsocram.h193
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdio.h617
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdioh.h441
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdiovar.h58
-rw-r--r--drivers/net/wireless/bcmdhd/include/siutils.h313
-rw-r--r--drivers/net/wireless/bcmdhd/include/spid.h153
-rw-r--r--drivers/net/wireless/bcmdhd/include/trxhdr.h53
-rw-r--r--drivers/net/wireless/bcmdhd/include/typedefs.h310
-rw-r--r--drivers/net/wireless/bcmdhd/include/usbrdl.h203
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h117
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlfc_proto.h229
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlioctl.h5067
-rw-r--r--drivers/net/wireless/bcmdhd/linux_osl.c1053
-rw-r--r--drivers/net/wireless/bcmdhd/sbutils.c1001
-rw-r--r--drivers/net/wireless/bcmdhd/siutils.c2356
-rw-r--r--drivers/net/wireless/bcmdhd/siutils_priv.h246
-rw-r--r--drivers/net/wireless/bcmdhd/uamp_api.h176
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.c845
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.h57
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.c7332
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.h658
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.c1967
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.h284
-rw-r--r--drivers/net/wireless/bcmdhd/wl_dbg.h63
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.c3737
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.h161
-rw-r--r--drivers/net/wireless/bcmdhd/wl_linux_mon.c422
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.c368
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.h110
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c21
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c2
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c60
-rw-r--r--drivers/platform/x86/asus-laptop.c19
-rw-r--r--drivers/platform/x86/samsung-laptop.c10
-rw-r--r--drivers/platform/x86/sony-laptop.c15
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/android_battery.c767
-rw-r--r--drivers/power/power_supply_core.c30
-rw-r--r--drivers/regulator/core.c15
-rw-r--r--drivers/regulator/max8997.c36
-rw-r--r--drivers/regulator/max8998.c44
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/char/con3215.c8
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/chsc.c31
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c10
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h5
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/sh/clk/cpg.c6
-rw-r--r--drivers/staging/android/Kconfig9
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/alarm-dev.c1
-rw-r--r--drivers/staging/android/ashmem.c21
-rw-r--r--drivers/staging/android/logger.c192
-rw-r--r--drivers/staging/android/logger.h40
-rw-r--r--drivers/staging/android/lowmemorykiller.c88
-rw-r--r--drivers/staging/comedi/Kconfig1
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c16
-rw-r--r--drivers/staging/fwserial/Kconfig4
-rw-r--r--drivers/staging/fwserial/TODO14
-rw-r--r--drivers/staging/fwserial/fwserial.c2
-rw-r--r--drivers/staging/fwserial/fwserial.h6
-rw-r--r--drivers/staging/iio/gyro/Kconfig4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c1
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c5
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c6
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO3
-rw-r--r--drivers/staging/omapdrm/omap_connector.c111
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c507
-rw-r--r--drivers/staging/omapdrm/omap_drv.c439
-rw-r--r--drivers/staging/omapdrm/omap_drv.h140
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c132
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/staging/omapdrm/omap_irq.c322
-rw-r--r--drivers/staging/omapdrm/omap_plane.c452
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c11
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/sb105x/Kconfig1
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/speakup/synth.c4
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c13
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c12
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c14
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c7
-rw-r--r--drivers/staging/zram/zram_drv.c39
-rw-r--r--drivers/switch/Kconfig15
-rw-r--r--drivers/switch/Makefile4
-rw-r--r--drivers/switch/switch_class.c174
-rw-r--r--drivers/switch/switch_gpio.c172
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/debugfs.c2
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/Makefile3
-rw-r--r--drivers/usb/gadget/amd5536udc.c4
-rw-r--r--drivers/usb/gadget/android.c1568
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/f_accessory.c1180
-rw-r--r--drivers/usb/gadget/f_adb.c619
-rw-r--r--drivers/usb/gadget/f_audio_source.c828
-rw-r--r--drivers/usb/gadget/f_mtp.c1283
-rw-r--r--drivers/usb/gadget/mv_udc_core.c4
-rw-r--r--drivers/usb/gadget/rndis.c11
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c5
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c3
-rw-r--r--drivers/usb/gadget/u_serial.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c9
-rw-r--r--drivers/usb/host/ehci-mv.c4
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/imx21-hcd.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/xhci-hub.c38
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/otg/Kconfig10
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/mv_otg.c4
-rw-r--r--drivers/usb/otg/otg-wakelock.c170
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c22
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c18
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/ssd1307fb.c4
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/Makefile3
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/exec.c3
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/fat/dir.c9
-rw-r--r--fs/fat/fat.h1
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/proc/base.c16
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/udf/super.c3
-rw-r--r--fs/xfs/xfs_buf.c12
-rw-r--r--fs/xfs/xfs_buf.h6
-rw-r--r--fs/xfs/xfs_buf_item.c49
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_dir2_block.c6
-rw-r--r--fs/xfs/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/xfs_trans_buf.c27
-rw-r--r--fs/yaffs2/Kconfig161
-rw-r--r--fs/yaffs2/Makefile17
-rw-r--r--fs/yaffs2/yaffs_allocator.c396
-rw-r--r--fs/yaffs2/yaffs_allocator.h30
-rw-r--r--fs/yaffs2/yaffs_attribs.c124
-rw-r--r--fs/yaffs2/yaffs_attribs.h28
-rw-r--r--fs/yaffs2/yaffs_bitmap.c98
-rw-r--r--fs/yaffs2/yaffs_bitmap.h33
-rw-r--r--fs/yaffs2/yaffs_checkptrw.c415
-rw-r--r--fs/yaffs2/yaffs_checkptrw.h33
-rw-r--r--fs/yaffs2/yaffs_ecc.c298
-rw-r--r--fs/yaffs2/yaffs_ecc.h44
-rw-r--r--fs/yaffs2/yaffs_getblockinfo.h35
-rw-r--r--fs/yaffs2/yaffs_guts.c5164
-rw-r--r--fs/yaffs2/yaffs_guts.h915
-rw-r--r--fs/yaffs2/yaffs_linux.h41
-rw-r--r--fs/yaffs2/yaffs_mtdif.c54
-rw-r--r--fs/yaffs2/yaffs_mtdif.h23
-rw-r--r--fs/yaffs2/yaffs_mtdif1.c330
-rw-r--r--fs/yaffs2/yaffs_mtdif1.h29
-rw-r--r--fs/yaffs2/yaffs_mtdif2.c225
-rw-r--r--fs/yaffs2/yaffs_mtdif2.h29
-rw-r--r--fs/yaffs2/yaffs_nameval.c201
-rw-r--r--fs/yaffs2/yaffs_nameval.h28
-rw-r--r--fs/yaffs2/yaffs_nand.c127
-rw-r--r--fs/yaffs2/yaffs_nand.h38
-rw-r--r--fs/yaffs2/yaffs_packedtags1.c53
-rw-r--r--fs/yaffs2/yaffs_packedtags1.h39
-rw-r--r--fs/yaffs2/yaffs_packedtags2.c196
-rw-r--r--fs/yaffs2/yaffs_packedtags2.h47
-rw-r--r--fs/yaffs2/yaffs_tagscompat.c422
-rw-r--r--fs/yaffs2/yaffs_tagscompat.h36
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.c27
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.h23
-rw-r--r--fs/yaffs2/yaffs_trace.h57
-rw-r--r--fs/yaffs2/yaffs_verify.c535
-rw-r--r--fs/yaffs2/yaffs_verify.h43
-rw-r--r--fs/yaffs2/yaffs_vfs.c2792
-rw-r--r--fs/yaffs2/yaffs_yaffs1.c433
-rw-r--r--fs/yaffs2/yaffs_yaffs1.h22
-rw-r--r--fs/yaffs2/yaffs_yaffs2.c1598
-rw-r--r--fs/yaffs2/yaffs_yaffs2.h39
-rw-r--r--fs/yaffs2/yportenv.h70
-rw-r--r--include/asm-generic/pgtable.h6
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/linux/akm8975.h87
-rw-r--r--include/linux/amba/mmci.h12
-rw-r--r--include/linux/android_aid.h28
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/compaction.h4
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpu_rmap.h13
-rw-r--r--include/linux/cpufreq.h17
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/gpio_event.h170
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/if_pppolac.h33
-rw-r--r--include/linux/if_pppopns.h32
-rw-r--r--include/linux/if_pppox.h21
-rw-r--r--include/linux/init.h20
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/ion.h354
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/keychord.h52
-rw-r--r--include/linux/keyreset.h28
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/host.h35
-rw-r--r--include/linux/mmc/pm.h1
-rwxr-xr-x[-rw-r--r--]include/linux/mmc/sdio_func.h10
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter/xt_qtaguid.h13
-rw-r--r--include/linux/netfilter/xt_quota2.h25
-rw-r--r--include/linux/platform_data/android_battery.h47
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/rbtree_augmented.h14
-rw-r--r--include/linux/rwsem.h9
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/sw_sync.h58
-rw-r--r--include/linux/switch.h53
-rw-r--r--include/linux/synaptics_i2c_rmi.h55
-rw-r--r--include/linux/sync.h426
-rw-r--r--include/linux/uid_stat.h29
-rw-r--r--include/linux/usb/f_accessory.h146
-rw-r--r--include/linux/usb/f_mtp.h75
-rw-r--r--include/linux/wakelock.h67
-rw-r--r--include/linux/wifi_tiwlan.h27
-rw-r--r--include/linux/wl127x-rfkill.h35
-rw-r--r--include/linux/wlan_plat.h27
-rw-r--r--include/net/activity_stats.h25
-rw-r--r--include/net/bluetooth/hci.h3
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/sound/cs4271.h2
-rw-r--r--include/sound/soc.h10
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/cpufreq_interactive.h112
-rw-r--r--include/trace/events/power.h19
-rw-r--r--include/trace/events/sched.h153
-rw-r--r--include/trace/events/sync.h82
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/if_pppox.h5
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/msdos_fs.h12
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h8
-rw-r--r--include/uapi/linux/netfilter/xt_socket.h6
-rw-r--r--include/uapi/linux/oom.h11
-rw-r--r--include/uapi/linux/sockios.h1
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/async.c3
-rw-r--r--kernel/audit.c40
-rw-r--r--kernel/audit_tree.c26
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditfilter.c1
-rw-r--r--kernel/auditsc.c20
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/debug/debug_core.c12
-rw-r--r--kernel/debug/kdb/kdb_io.c12
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/irq/irqdesc.c21
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/module.c27
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/power/Kconfig22
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/fbearlysuspend.c116
-rw-r--r--kernel/power/suspend_time.c111
-rw-r--r--kernel/rwsem.c10
-rw-r--r--kernel/sched/core.c17
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c1021
-rw-r--r--kernel/sched/sched.h13
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/trace/trace.c17
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/cpu_rmap.c54
-rw-r--r--lib/rbtree.c20
-rw-r--r--linaro/configs/android.conf32
-rw-r--r--linaro/configs/arndale.conf67
-rw-r--r--linaro/configs/big-LITTLE-MP.conf13
-rw-r--r--linaro/configs/highbank.conf39
-rw-r--r--linaro/configs/imx5.conf105
-rw-r--r--linaro/configs/linaro-base.conf90
-rw-r--r--linaro/configs/null.conf1
-rw-r--r--linaro/configs/omap4.conf204
-rw-r--r--linaro/configs/origen.conf88
-rw-r--r--linaro/configs/u8500.conf118
-rw-r--r--linaro/configs/ubuntu-minimal.conf24
-rw-r--r--linaro/configs/ubuntu.conf2129
-rw-r--r--linaro/configs/vexpress.conf35
-rw-r--r--linaro/configs/vexpress64.conf94
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/compaction.c98
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/internal.h1
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c40
-rw-r--r--mm/shmem.c13
-rw-r--r--net/Kconfig16
-rw-r--r--net/Makefile1
-rw-r--r--net/activity_stats.c115
-rw-r--r--net/bluetooth/af_bluetooth.c38
-rw-r--r--net/bluetooth/hci_conn.c18
-rwxr-xr-x[-rw-r--r--]net/bluetooth/hci_event.c13
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bridge/br_device.c11
-rw-r--r--net/core/dev.c8
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/devinet.c8
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c8
-rw-r--r--net/ipv4/sysfs_net_ipv4.c88
-rw-r--r--net/ipv4/tcp.c132
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv6/addrconf.c27
-rw-r--r--net/ipv6/af_inet6.c34
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c9
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/chan.c38
-rw-r--r--net/mac80211/ibss.c9
-rw-r--r--net/mac80211/ieee80211_i.h16
-rw-r--r--net/mac80211/iface.c48
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mlme.c75
-rw-r--r--net/mac80211/scan.c46
-rw-r--r--net/mac80211/sta_info.c46
-rw-r--r--net/mac80211/sta_info.h3
-rw-r--r--net/netfilter/Kconfig42
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/xt_IDLETIMER.c78
-rw-r--r--net/netfilter/xt_qtaguid.c2982
-rw-r--r--net/netfilter/xt_qtaguid_internal.h333
-rw-r--r--net/netfilter/xt_qtaguid_print.c564
-rw-r--r--net/netfilter/xt_qtaguid_print.h120
-rw-r--r--net/netfilter/xt_quota2.c390
-rw-r--r--net/netfilter/xt_socket.c69
-rw-r--r--net/rfkill/Kconfig5
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/sched.c3
-rw-r--r--net/sunrpc/xprt.c12
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c6
-rw-r--r--scripts/package/builddeb86
-rw-r--r--security/commoncap.c11
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c26
-rw-r--r--sound/pci/hda/hda_intel.c13
-rw-r--r--sound/pci/hda/patch_conexant.c16
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c24
-rw-r--r--sound/pci/rme9652/hdspm.c17
-rw-r--r--sound/soc/codecs/arizona.c9
-rw-r--r--sound/soc/codecs/arizona.h18
-rw-r--r--sound/soc/codecs/cs4271.c6
-rw-r--r--sound/soc/codecs/cs42l52.c4
-rw-r--r--sound/soc/codecs/lm49453.c106
-rw-r--r--sound/soc/codecs/sgtl5000.c4
-rw-r--r--sound/soc/codecs/sta529.c9
-rw-r--r--sound/soc/codecs/wm2000.c4
-rw-r--r--sound/soc/codecs/wm2200.c8
-rw-r--r--sound/soc/codecs/wm5100.c6
-rw-r--r--sound/soc/codecs/wm5102.c48
-rw-r--r--sound/soc/codecs/wm_adsp.c23
-rw-r--r--sound/soc/soc-core.c35
-rw-r--r--sound/soc/soc-pcm.c1
-rw-r--r--sound/usb/mixer_maps.c13
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/pcm.c10
-rw-r--r--sound/usb/quirks-table.h4
-rw-r--r--sound/usb/quirks.c19
-rw-r--r--tools/gator/Makefile4
-rw-r--r--tools/gator/daemon/Android.mk39
-rw-r--r--tools/gator/daemon/CapturedXML.cpp150
-rw-r--r--tools/gator/daemon/CapturedXML.h27
-rw-r--r--tools/gator/daemon/Child.cpp315
-rw-r--r--tools/gator/daemon/Child.h31
-rw-r--r--tools/gator/daemon/Collector.cpp292
-rw-r--r--tools/gator/daemon/Collector.h37
-rw-r--r--tools/gator/daemon/ConfigurationXML.cpp188
-rw-r--r--tools/gator/daemon/ConfigurationXML.h32
-rw-r--r--tools/gator/daemon/Fifo.cpp125
-rw-r--r--tools/gator/daemon/Fifo.h33
-rw-r--r--tools/gator/daemon/LocalCapture.cpp128
-rw-r--r--tools/gator/daemon/LocalCapture.h26
-rw-r--r--tools/gator/daemon/Logging.cpp79
-rw-r--r--tools/gator/daemon/Logging.h47
-rw-r--r--tools/gator/daemon/Makefile58
-rw-r--r--tools/gator/daemon/OlySocket.cpp259
-rw-r--r--tools/gator/daemon/OlySocket.h36
-rw-r--r--tools/gator/daemon/OlyUtility.cpp228
-rw-r--r--tools/gator/daemon/OlyUtility.h40
-rw-r--r--tools/gator/daemon/Sender.cpp109
-rw-r--r--tools/gator/daemon/Sender.h37
-rw-r--r--tools/gator/daemon/SessionData.cpp100
-rw-r--r--tools/gator/daemon/SessionData.h71
-rw-r--r--tools/gator/daemon/SessionXML.cpp105
-rw-r--r--tools/gator/daemon/SessionXML.h36
-rw-r--r--tools/gator/daemon/StreamlineSetup.cpp326
-rw-r--r--tools/gator/daemon/StreamlineSetup.h45
-rw-r--r--tools/gator/daemon/configuration.xml47
-rw-r--r--tools/gator/daemon/escape.c63
-rw-r--r--tools/gator/daemon/events-ARM11.xml39
-rw-r--r--tools/gator/daemon/events-ARM11MPCore.xml26
-rw-r--r--tools/gator/daemon/events-Cortex-A15.xml68
-rw-r--r--tools/gator/daemon/events-Cortex-A5.xml36
-rw-r--r--tools/gator/daemon/events-Cortex-A53.xml171
-rw-r--r--tools/gator/daemon/events-Cortex-A57.xml171
-rw-r--r--tools/gator/daemon/events-Cortex-A7.xml44
-rw-r--r--tools/gator/daemon/events-Cortex-A8.xml52
-rw-r--r--tools/gator/daemon/events-Cortex-A9.xml65
-rw-r--r--tools/gator/daemon/events-Krait-architected.xml22
-rw-r--r--tools/gator/daemon/events-L2C-310.xml18
-rw-r--r--tools/gator/daemon/events-Linux.xml15
-rw-r--r--tools/gator/daemon/events-Mali-400.xml382
-rw-r--r--tools/gator/daemon/events-Mali-T6xx.xml38
-rw-r--r--tools/gator/daemon/events-Mali-T6xx_hw.xml278
-rw-r--r--tools/gator/daemon/events-Scorpion.xml107
-rw-r--r--tools/gator/daemon/events-ScorpionMP.xml90
-rw-r--r--tools/gator/daemon/events_footer.xml1
-rw-r--r--tools/gator/daemon/events_header.xml2
-rw-r--r--tools/gator/daemon/main.cpp383
-rw-r--r--tools/gator/daemon/mxml/COPYING507
-rw-r--r--tools/gator/daemon/mxml/config.h96
-rw-r--r--tools/gator/daemon/mxml/mxml-attr.c319
-rw-r--r--tools/gator/daemon/mxml/mxml-entity.c460
-rw-r--r--tools/gator/daemon/mxml/mxml-file.c3082
-rw-r--r--tools/gator/daemon/mxml/mxml-get.c471
-rw-r--r--tools/gator/daemon/mxml/mxml-index.c662
-rw-r--r--tools/gator/daemon/mxml/mxml-node.c807
-rw-r--r--tools/gator/daemon/mxml/mxml-private.c331
-rw-r--r--tools/gator/daemon/mxml/mxml-private.h50
-rw-r--r--tools/gator/daemon/mxml/mxml-search.c287
-rw-r--r--tools/gator/daemon/mxml/mxml-set.c349
-rw-r--r--tools/gator/daemon/mxml/mxml-string.c476
-rw-r--r--tools/gator/daemon/mxml/mxml.h329
-rw-r--r--tools/gator/debian/changelog132
-rw-r--r--tools/gator/debian/compat1
-rw-r--r--tools/gator/debian/control38
-rw-r--r--tools/gator/debian/copyright15
-rw-r--r--tools/gator/debian/dkms.conf.in7
-rw-r--r--tools/gator/debian/gator-daemon.install1
-rw-r--r--tools/gator/debian/gator-daemon.postrm41
-rw-r--r--tools/gator/debian/gator-daemon.preinst36
-rw-r--r--tools/gator/debian/gator-daemon.upstart10
-rwxr-xr-xtools/gator/debian/gator-module-dkms.install.in2
-rwxr-xr-xtools/gator/debian/gator-module-dkms.postinst49
-rwxr-xr-xtools/gator/debian/gator-module-dkms.prerm29
-rwxr-xr-xtools/gator/debian/rules15
-rwxr-xr-xtools/gator/debian/scripts/do-packaging104
908 files changed, 157387 insertions, 2912 deletions
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 000000000000..9a3cb88ade47
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
+What: /proc/<pid>/oom_adj
+When: August 2012
+Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
+ badness heuristic used to determine which task to kill when the kernel
+ is out of memory.
+
+ The badness heuristic has since been rewritten since the introduction of
+ this tunable such that its meaning is deprecated. The value was
+ implemented as a bitshift on a score generated by the badness()
+ function that did not have any precise units of measure. With the
+ rewrite, the score is given as a proportion of available memory to the
+ task allocating pages, so using a bitshift which grows the score
+ exponentially is, thus, impossible to tune with fine granularity.
+
+ A much more powerful interface, /proc/<pid>/oom_score_adj, was
+ introduced with the oom killer rewrite that allows users to increase or
+ decrease the badness score linearly. This interface will replace
+ /proc/<pid>/oom_adj.
+
+ A warning will be emitted to the kernel log if an application uses this
+ deprecated interface. After it is printed once, future warnings will be
+ suppressed until the kernel is rebooted.
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644
index 000000000000..72a62afdf202
--- /dev/null
+++ b/Documentation/android.txt
@@ -0,0 +1,121 @@
+ =============
+ A N D R O I D
+ =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+ 1.1 Required enabled config options
+ 1.2 Required disabled config options
+ 1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+ANDROID_RAM_CONSOLE
+ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index c7a2eb8450c2..b4ae5e681a6e 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -28,6 +28,7 @@ Contents:
2.3 Userspace
2.4 Ondemand
2.5 Conservative
+2.6 Interactive
3. The Governor Interface in the CPUfreq Core
@@ -191,6 +192,81 @@ governor but for the opposite direction. For example when set to its
default value of '20' it means that if the CPU usage needs to be below
20% between samples to have the frequency decreased.
+
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value. In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target. The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds. Colons can be used between the speeds and associated
+target loads for readability. For example:
+
+ 85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted. If speeds are
+specified these must appear in ascending order. Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load. If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher. Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle. A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.) Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum. For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed. For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed. A value of -1 means defer timers
+indefinitely at all speeds. Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute. If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual. Default is 80000 uS.
+
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 343781b9f246..4ce82d045a6b 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -16,6 +16,9 @@ Required properties:
"arm,arm1176-pmu"
"arm,arm1136-pmu"
- interrupts : 1 combined interrupt or 1 per core.
+- cluster : a phandle to the cluster to which it belongs
+ If there are more than one cluster with same CPU type
+ then there should be separate PMU nodes per cluster.
Example:
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index fd8d0d594fc7..5dcea6447ae8 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1410,6 +1410,9 @@ The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
requires CAP_SYS_RESOURCE.
+NOTICE: /proc/<pid>/oom_adj is deprecated and will be removed, please see
+Documentation/feature-removal-schedule.txt.
+
Caveat: when a parent task is selected, the oom killer will sacrifice any first
generation children with separate address spaces instead, if possible. This
avoids servers and important system daemons from being killed and loses the
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 363e348bff9b..c6ec1631d9c4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1177,6 +1177,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See comment before ip2_setup() in
drivers/char/ip2/ip2base.c.
+ irqaffinity= [SMP] Set the default irq affinity mask
+ Format:
+ <cpu number>,...,<cpu number>
+ or
+ <cpu number>-<cpu number>
+ (must be a positive range in ascending order)
+ or a mixture
+ <cpu number>,...,<cpu number>-<cpu number>
+
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
diff --git a/MAINTAINERS b/MAINTAINERS
index 915564eda145..3105c4868c4e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -228,7 +228,7 @@ S: Maintained
F: drivers/platform/x86/acerhdf.c
ACER WMI LAPTOP EXTRAS
-M: Joey Lee <jlee@novell.com>
+M: "Lee, Chun-Yi" <jlee@suse.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/acer-wmi.c
@@ -648,7 +648,7 @@ F: arch/arm/
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: MAINTAINED
+S: Maintained
F: arch/arm/mach-*/
F: arch/arm/plat-*/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
@@ -1351,6 +1351,14 @@ W: http://wireless.kernel.org/en/users/Drivers/ath9k
S: Supported
F: drivers/net/wireless/ath/ath9k/
+WILOCITY WIL6210 WIRELESS DRIVER
+M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+L: linux-wireless@vger.kernel.org
+L: wil6210@qca.qualcomm.com
+S: Supported
+W: http://wireless.kernel.org/en/users/Drivers/wil6210
+F: drivers/net/wireless/ath/wil6210/
+
CARL9170 LINUX COMMUNITY WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
@@ -1964,9 +1972,9 @@ S: Maintained
F: drivers/usb/host/ohci-ep93xx.c
CIRRUS LOGIC CS4270 SOUND DRIVER
-M: Timur Tabi <timur@freescale.com>
+M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Supported
+S: Odd Fixes
F: sound/soc/codecs/cs4270*
CLEANCACHE API
@@ -3183,9 +3191,9 @@ F: include/uapi/video/
F: include/uapi/linux/fb.h
FREESCALE DIU FRAMEBUFFER DRIVER
-M: Timur Tabi <timur@freescale.com>
+M: Timur Tabi <timur@tabi.org>
L: linux-fbdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/video/fsl-diu-fb.*
FREESCALE DMA DRIVER
@@ -3220,9 +3228,8 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY
-M: Timur Tabi <timur@freescale.com>
L: linuxppc-dev@lists.ozlabs.org
-S: Supported
+S: Orphan
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
@@ -3241,16 +3248,16 @@ S: Maintained
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER
-M: Timur Tabi <timur@freescale.com>
+M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
-S: Supported
+S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
-M: Timur Tabi <timur@freescale.com>
+M: Timur Tabi <timur@tabi.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
-S: Supported
+S: Maintained
F: sound/soc/fsl/fsl*
F: sound/soc/fsl/mpc8610_hpcd.c
@@ -5077,7 +5084,7 @@ S: Maintained
F: drivers/media/radio/radio-mr800.c
MSI LAPTOP SUPPORT
-M: "Lee, Chun-Yi" <jlee@novell.com>
+M: "Lee, Chun-Yi" <jlee@suse.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@@ -5507,8 +5514,7 @@ M: Benoît Cousson <b-cousson@ti.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
-F: arch/arm/mach-omap2/omap_hwmod.c
-F: arch/arm/plat-omap/include/plat/omap_hwmod.h
+F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
M: Benoît Cousson <b-cousson@ti.com>
@@ -7334,7 +7340,7 @@ S: Odd Fixes
F: drivers/staging/speakup/
STAGING - TI DSP BRIDGE DRIVERS
-M: Omar Ramirez Luna <omar.ramirez@ti.com>
+M: Omar Ramirez Luna <omar.ramirez@copitl.com>
S: Odd Fixes
F: drivers/staging/tidspbridge/
@@ -8526,7 +8532,7 @@ F: Documentation/x86/
F: arch/x86/
X86 PLATFORM DRIVERS
-M: Matthew Garrett <mjg@redhat.com>
+M: Matthew Garrett <matthew.garrett@nebula.com>
L: platform-driver-x86@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
S: Maintained
diff --git a/Makefile b/Makefile
index a1667c4bcce5..253a455d8d8d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Terrified Chipmunk
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 67874b82a4ed..f0bf7e9d7c5a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1567,6 +1567,91 @@ config SCHED_SMT
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ default y
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_VARIABLE_SCALE
+ bool "Allows changing the load tracking scale through sysfs"
+ depends on SCHED_HMP
+ help
+ When turned on, this option exports the thresholds and load average
+ period value for the load tracking patches through sysfs.
+ The values can be modified to change the rate of load accumulation
+ and the thresholds used for HMP migration.
+ The load_avg_period_ms is the time in ms to reach a load average of
+ 0.5 for an idle task of 0 load average ratio that start a busy loop.
+ The up_threshold and down_threshold is the value to go to a faster
+ CPU or to go back to a slower cpu.
+ The {up,down}_threshold are devided by 1024 before being compared
+ to the load average.
+ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
+ a running task with a load of 0 will be migrated to a bigger CPU after
+ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+ up_threshold is 0.5.
+ This patch has the same behavior as changing the Y of the load
+ average computation to
+ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+ but it remove intermadiate overflows in computation.
+
+config HMP_FREQUENCY_INVARIANT_SCALE
+ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
+ depends on HMP_VARIABLE_SCALE && CPU_FREQ
+ help
+ Scales the current load contribution in line with the frequency
+ of the CPU that the task was executed on.
+ In this version, we use a simple linear scale derived from the
+ maximum frequency reported by CPUFreq.
+ Restricting tracked load to be scaled by the CPU's frequency
+ represents the consumption of possible compute capacity
+ (rather than consumption of actual instantaneous capacity as
+ normal) and allows the HMP migration's simple threshold
+ migration strategy to interact more predictably with CPUFreq's
+ asynchronous compute capacity changes.
+
config HAVE_ARM_SCU
bool
help
@@ -1865,6 +1950,15 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
+config ARM_FLUSH_CONSOLE_ON_RESTART
+ bool "Force flush the console on restart"
+ help
+ If the console is locked while the system is rebooted, the messages
+ in the temporary logbuffer would not have propogated to all the
+ console drivers. This option forces the console lock to be
+ released if it failed to be acquired, which will cause all the
+ pending messages to be flushed.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 661030d6bc6c..f14bbd556083 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,6 +63,27 @@ config DEBUG_USER
8 - SIGSEGV faults
16 - SIGBUS faults
+config DEBUG_RODATA
+ bool "Write protect kernel text section"
+ default n
+ depends on DEBUG_KERNEL && MMU
+ ---help---
+ Mark the kernel text section as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+ data. This will cause the size of the kernel, plus up to 4MB, to
+ be mapped as pages instead of sections, which will increase TLB
+ pressure.
+ If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+ bool "Testcase for the DEBUG_RODATA feature"
+ depends on DEBUG_RODATA
+ default n
+ ---help---
+ This option enables a testcase for the DEBUG_RODATA
+ feature.
+ If in doubt, say "N"
+
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fe4d9c3ad761..a2361900db16 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -805,6 +805,8 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
+#if !defined(CONFIG_CPU_V7)
+ /* This collides with some V7 IDs, preventing correct detection */
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
@@ -813,6 +815,7 @@ proc_types:
THUMB( nop )
mov pc, lr
THUMB( nop )
+#endif
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 50438d6a6781..5f39c8ea900b 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -157,6 +157,7 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
targets += dtbs
+targets += $(dtb-y)
endif
# *.dtb used to be generated in the directory above. Clean out the
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 68bccf41a2c6..cb7bcc51608d 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -306,6 +306,22 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx-0 {
+ atmel,pins =
+ <1 16 0x1 0x0 /* PB16 periph A */
+ 1 17 0x1 0x0 /* PB17 periph A */
+ 1 18 0x1 0x0>; /* PB18 periph A */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx-0 {
+ atmel,pins =
+ <1 19 0x1 0x0 /* PB19 periph A */
+ 1 20 0x1 0x0 /* PB20 periph A */
+ 1 21 0x1 0x0>; /* PB21 periph A */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -450,6 +466,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfffbc000 0x4000>;
interrupts = <14 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 32ec62cf5385..271d4de026e9 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -271,6 +271,38 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx-0 {
+ atmel,pins =
+ <1 0 0x2 0x0 /* PB0 periph B */
+ 1 1 0x2 0x0 /* PB1 periph B */
+ 1 2 0x2 0x0>; /* PB2 periph B */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx-0 {
+ atmel,pins =
+ <1 3 0x2 0x0 /* PB3 periph B */
+ 1 4 0x2 0x0 /* PB4 periph B */
+ 1 5 0x2 0x0>; /* PB5 periph B */
+ };
+ };
+
+ ssc1 {
+ pinctrl_ssc1_tx: ssc1_tx-0 {
+ atmel,pins =
+ <1 6 0x1 0x0 /* PB6 periph A */
+ 1 7 0x1 0x0 /* PB7 periph A */
+ 1 8 0x1 0x0>; /* PB8 periph A */
+ };
+
+ pinctrl_ssc1_rx: ssc1_rx-0 {
+ atmel,pins =
+ <1 9 0x1 0x0 /* PB9 periph A */
+ 1 10 0x1 0x0 /* PB10 periph A */
+ 1 11 0x1 0x0>; /* PB11 periph A */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -368,6 +400,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfff98000 0x4000>;
interrupts = <16 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
@@ -375,6 +409,8 @@
compatible = "atmel,at91rm9200-ssc";
reg = <0xfff9c000 0x4000>;
interrupts = <17 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 231858ffd850..6b1d4cab24c2 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -290,6 +290,38 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx-0 {
+ atmel,pins =
+ <3 0 0x1 0x0 /* PD0 periph A */
+ 3 1 0x1 0x0 /* PD1 periph A */
+ 3 2 0x1 0x0>; /* PD2 periph A */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx-0 {
+ atmel,pins =
+ <3 3 0x1 0x0 /* PD3 periph A */
+ 3 4 0x1 0x0 /* PD4 periph A */
+ 3 5 0x1 0x0>; /* PD5 periph A */
+ };
+ };
+
+ ssc1 {
+ pinctrl_ssc1_tx: ssc1_tx-0 {
+ atmel,pins =
+ <3 10 0x1 0x0 /* PD10 periph A */
+ 3 11 0x1 0x0 /* PD11 periph A */
+ 3 12 0x1 0x0>; /* PD12 periph A */
+ };
+
+ pinctrl_ssc1_rx: ssc1_rx-0 {
+ atmel,pins =
+ <3 13 0x1 0x0 /* PD13 periph A */
+ 3 14 0x1 0x0 /* PD14 periph A */
+ 3 15 0x1 0x0>; /* PD15 periph A */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -425,6 +457,8 @@
compatible = "atmel,at91sam9g45-ssc";
reg = <0xfff9c000 0x4000>;
interrupts = <16 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
};
@@ -432,6 +466,8 @@
compatible = "atmel,at91sam9g45-ssc";
reg = <0xfffa0000 0x4000>;
interrupts = <17 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index e9efb34f4379..80e29c605d4e 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -28,6 +28,7 @@
tcb1 = &tcb1;
i2c0 = &i2c0;
i2c1 = &i2c1;
+ ssc0 = &ssc0;
};
cpus {
cpu@0 {
@@ -244,6 +245,22 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx-0 {
+ atmel,pins =
+ <0 24 0x2 0x0 /* PA24 periph B */
+ 0 25 0x2 0x0 /* PA25 periph B */
+ 0 26 0x2 0x0>; /* PA26 periph B */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx-0 {
+ atmel,pins =
+ <0 27 0x2 0x0 /* PA27 periph B */
+ 0 28 0x2 0x0 /* PA28 periph B */
+ 0 29 0x2 0x0>; /* PA29 periph B */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -294,6 +311,15 @@
status = "disabled";
};
+ ssc0: ssc@f0010000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xf0010000 0x4000>;
+ interrupts = <28 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+ status = "disabled";
+ };
+
usart0: serial@f801c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf801c000 0x4000>;
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 40ac3a4eb1ab..3a47cf952146 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -88,13 +88,6 @@
interrupts = <1 4 7>;
};
- ssc0: ssc@f0010000 {
- compatible = "atmel,at91sam9g45-ssc";
- reg = <0xf0010000 0x4000>;
- interrupts = <28 4 5>;
- status = "disabled";
- };
-
tcb0: timer@f8008000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf8008000 0x100>;
@@ -290,6 +283,22 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx-0 {
+ atmel,pins =
+ <0 24 0x2 0x0 /* PA24 periph B */
+ 0 25 0x2 0x0 /* PA25 periph B */
+ 0 26 0x2 0x0>; /* PA26 periph B */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx-0 {
+ atmel,pins =
+ <0 27 0x2 0x0 /* PA27 periph B */
+ 0 28 0x2 0x0 /* PA28 periph B */
+ 0 29 0x2 0x0>; /* PA29 periph B */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -333,6 +342,15 @@
};
};
+ ssc0: ssc@f0010000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xf0010000 0x4000>;
+ interrupts = <28 4 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+ status = "disabled";
+ };
+
mmc0: mmc@f0008000 {
compatible = "atmel,hsmci";
reg = <0xf0008000 0x600>;
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 45ceeb0e93e0..842dd85e784f 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -40,3 +40,53 @@ config SHARP_PARAM
config SHARP_SCOOP
bool
+
+config FIQ_GLUE
+ bool
+ select FIQ
+
+config FIQ_DEBUGGER
+ bool "FIQ Mode Serial Debugger"
+ select FIQ
+ select FIQ_GLUE
+ default n
+ help
+ The FIQ serial debugger can accept commands even when the
+ kernel is unresponsive due to being stuck with interrupts
+ disabled.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+ bool "Keep serial debugger active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables the serial debugger at boot. Passing
+ fiq_debugger.no_sleep on the kernel commandline will
+ override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+ bool "Don't disable wakeup IRQ when debugger is active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Don't disable the wakeup irq when enabling the uart clock. This will
+ cause extra interrupts, but it makes the serial debugger usable with
+ on some MSM radio builds that ignore the uart clock request in power
+ collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+ bool "Console on FIQ Serial Debugger port"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables a console so that printk messages are displayed on
+ the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+ bool "Put the FIQ debugger into console mode by default"
+ depends on FIQ_DEBUGGER_CONSOLE
+ default n
+ help
+ If enabled, this puts the fiq debugger into console mode by default.
+ Otherwise, the fiq debugger will start out in debug mode.
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index e8a4e58f1b82..091576aabf0e 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
new file mode 100644
index 000000000000..3f25a6a2f547
--- /dev/null
+++ b/arch/arm/common/fiq_debugger.c
@@ -0,0 +1,1370 @@
+/*
+ * arch/arm/common/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+#include <linux/kmsg_dump.h>
+
+#include <asm/fiq_debugger.h>
+#include <asm/fiq_glue.h>
+#include <asm/stacktrace.h>
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+ ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_state {
+ struct fiq_glue_handler handler;
+
+ int fiq;
+ int uart_irq;
+ int signal_irq;
+ int wakeup_irq;
+ bool wakeup_irq_no_set_wake;
+ struct clk *clk;
+ struct fiq_debugger_pdata *pdata;
+ struct platform_device *pdev;
+
+ char debug_cmd[DEBUG_MAX];
+ int debug_busy;
+ int debug_abort;
+
+ char debug_buf[DEBUG_MAX];
+ int debug_count;
+
+ bool no_sleep;
+ bool debug_enable;
+ bool ignore_next_wakeup_irq;
+ struct timer_list sleep_timer;
+ spinlock_t sleep_timer_lock;
+ bool uart_enabled;
+ struct wake_lock debugger_wake_lock;
+ bool console_enable;
+ int current_cpu;
+ atomic_t unhandled_fiq_count;
+ bool in_fiq;
+
+ struct work_struct work;
+ spinlock_t work_lock;
+ char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ spinlock_t console_lock;
+ struct console console;
+ struct tty_struct *tty;
+ int tty_open_count;
+ struct fiq_debugger_ringbuf *tty_rbuf;
+ bool syslog_dumping;
+#endif
+
+ unsigned int last_irqs[NR_IRQS];
+ unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ enable_irq(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ enable_irq_wake(state->wakeup_irq);
+}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ disable_irq_nosync(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static bool inline debug_have_fiq(struct fiq_debugger_state *state)
+{
+ return (state->fiq >= 0);
+}
+
+static void debug_force_irq(struct fiq_debugger_state *state)
+{
+ unsigned int irq = state->signal_irq;
+
+ if (WARN_ON(!debug_have_fiq(state)))
+ return;
+ if (state->pdata->force_irq) {
+ state->pdata->force_irq(state->pdev, irq);
+ } else {
+ struct irq_chip *chip = irq_get_chip(irq);
+ if (chip && chip->irq_retrigger)
+ chip->irq_retrigger(irq_get_irq_data(irq));
+ }
+}
+
+static void debug_uart_enable(struct fiq_debugger_state *state)
+{
+ if (state->clk)
+ clk_enable(state->clk);
+ if (state->pdata->uart_enable)
+ state->pdata->uart_enable(state->pdev);
+}
+
+static void debug_uart_disable(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_disable)
+ state->pdata->uart_disable(state->pdev);
+ if (state->clk)
+ clk_disable(state->clk);
+}
+
+static void debug_uart_flush(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_flush)
+ state->pdata->uart_flush(state->pdev);
+}
+
+static void debug_putc(struct fiq_debugger_state *state, char c)
+{
+ state->pdata->uart_putc(state->pdev, c);
+}
+
+static void debug_puts(struct fiq_debugger_state *state, char *s)
+{
+ unsigned c;
+ while ((c = *s++)) {
+ if (c == '\n')
+ debug_putc(state, '\r');
+ debug_putc(state, c);
+ }
+}
+
+static void debug_prompt(struct fiq_debugger_state *state)
+{
+ debug_puts(state, "debug> ");
+}
+
+static char *mode_name(unsigned cpsr)
+{
+ switch (cpsr & MODE_MASK) {
+ case USR_MODE: return "USR";
+ case FIQ_MODE: return "FIQ";
+ case IRQ_MODE: return "IRQ";
+ case SVC_MODE: return "SVC";
+ case ABT_MODE: return "ABT";
+ case UND_MODE: return "UND";
+ case SYSTEM_MODE: return "SYS";
+ default: return "???";
+ }
+}
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+ struct fiq_debugger_state *state = cookie;
+ char buf[256];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ debug_puts(state, buf);
+ return state->debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+ struct fiq_debugger_state *state = cookie;
+ char buf[256];
+ va_list ap;
+ unsigned long irq_flags;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, 128, fmt, ap);
+ va_end(ap);
+
+ local_irq_save(irq_flags);
+ debug_puts(state, buf);
+ debug_uart_flush(state);
+ local_irq_restore(irq_flags);
+ return state->debug_abort;
+}
+
+static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
+{
+ debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n",
+ regs[0], regs[1], regs[2], regs[3]);
+ debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n",
+ regs[4], regs[5], regs[6], regs[7]);
+ debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n",
+ regs[8], regs[9], regs[10], regs[11],
+ mode_name(regs[16]));
+ if ((regs[16] & MODE_MASK) == USR_MODE)
+ debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
+ "cpsr %08x\n", regs[12], regs[13], regs[14],
+ regs[15], regs[16]);
+ else
+ debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
+ "cpsr %08x spsr %08x\n", regs[12], regs[13],
+ regs[14], regs[15], regs[16], regs[17]);
+}
+
+struct mode_regs {
+ unsigned long sp_svc;
+ unsigned long lr_svc;
+ unsigned long spsr_svc;
+
+ unsigned long sp_abt;
+ unsigned long lr_abt;
+ unsigned long spsr_abt;
+
+ unsigned long sp_und;
+ unsigned long lr_und;
+ unsigned long spsr_und;
+
+ unsigned long sp_irq;
+ unsigned long lr_irq;
+ unsigned long spsr_irq;
+
+ unsigned long r8_fiq;
+ unsigned long r9_fiq;
+ unsigned long r10_fiq;
+ unsigned long r11_fiq;
+ unsigned long r12_fiq;
+ unsigned long sp_fiq;
+ unsigned long lr_fiq;
+ unsigned long spsr_fiq;
+};
+
+void __naked get_mode_regs(struct mode_regs *regs)
+{
+ asm volatile (
+ "mrs r1, cpsr\n"
+ "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r8 - r14}\n"
+ "mrs r2, spsr\n"
+ "stmia r0!, {r2}\n"
+ "msr cpsr_c, r1\n"
+ "bx lr\n");
+}
+
+
+static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
+{
+ struct mode_regs mode_regs;
+ dump_regs(state, regs);
+ get_mode_regs(&mode_regs);
+ debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+ debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+ debug_printf(state, " und: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+ debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+ debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x "
+ "r12 %08x\n",
+ mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+ mode_regs.r11_fiq, mode_regs.r12_fiq);
+ debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+static void dump_irqs(struct fiq_debugger_state *state)
+{
+ int n;
+
+ debug_printf(state, "irqnr total since-last status name\n");
+ for (n = 0; n < NR_IRQS; n++) {
+ struct irqaction *act = irq_desc[n].action;
+ if (!act && !kstat_irqs(n))
+ continue;
+ debug_printf(state, "%5d: %10u %11u %8x %s\n", n,
+ kstat_irqs(n),
+ kstat_irqs(n) - state->last_irqs[n],
+ irq_desc[n].status_use_accessors,
+ (act && act->name) ? act->name : "???");
+ state->last_irqs[n] = kstat_irqs(n);
+ }
+}
+
+struct stacktrace_state {
+ struct fiq_debugger_state *state;
+ unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+ struct stacktrace_state *sts = d;
+
+ if (sts->depth) {
+ debug_printf(sts->state,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ frame->pc, frame->pc, frame->lr, frame->lr,
+ frame->sp, frame->fp);
+ sts->depth--;
+ return 0;
+ }
+ debug_printf(sts->state, " ...\n");
+
+ return sts->depth == 0;
+}
+
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
+ struct frame_tail *tail)
+{
+ struct frame_tail buftail[2];
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+ debug_printf(state, " invalid frame pointer %p\n", tail);
+ return NULL;
+ }
+ if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+ debug_printf(state,
+ " failed to copy frame pointer %p\n", tail);
+ return NULL;
+ }
+
+ debug_printf(state, " %p\n", buftail[0].lr);
+
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (tail >= buftail[0].fp)
+ return NULL;
+
+ return buftail[0].fp-1;
+}
+
+void dump_stacktrace(struct fiq_debugger_state *state,
+ struct pt_regs * const regs, unsigned int depth, void *ssp)
+{
+ struct frame_tail *tail;
+ struct thread_info *real_thread_info = THREAD_INFO(ssp);
+ struct stacktrace_state sts;
+
+ sts.depth = depth;
+ sts.state = state;
+ *current_thread_info() = *real_thread_info;
+
+ if (!current)
+ debug_printf(state, "current NULL\n");
+ else
+ debug_printf(state, "pid: %d comm: %s\n",
+ current->pid, current->comm);
+ dump_regs(state, (unsigned *)regs);
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ debug_printf(state,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+ regs->ARM_sp, regs->ARM_fp);
+ walk_stackframe(&frame, report_trace, &sts);
+ return;
+ }
+
+ tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+ while (depth-- && tail && !((unsigned long) tail & 3))
+ tail = user_backtrace(state, tail);
+}
+
+static void do_ps(struct fiq_debugger_state *state)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned task_state;
+ static const char stat_nam[] = "RSDTtZX";
+
+ debug_printf(state, "pid ppid prio task pc\n");
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_state = p->state ? __ffs(p->state) + 1 : 0;
+ debug_printf(state,
+ "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+ debug_printf(state, "%-13.13s %c", p->comm,
+ task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+ if (task_state == TASK_RUNNING)
+ debug_printf(state, " running\n");
+ else
+ debug_printf(state, " %08lx\n", thread_saved_pc(p));
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+static void dump_kernel_log(struct fiq_debugger_state *state)
+{
+#ifdef CONFIG_PRINTK
+ char buf[512];
+ struct kmsg_dumper dumper = { .active = 1 };
+ size_t len;
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len))
+ debug_printf(state, "%.*s\n", (int)len - 1, buf);
+#else
+ debug_puts("Unavailable without CONFIG_PRINTK.\n");
+#endif /* CONFIG_PRINTK */
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ dump_kernel_log(state);
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+ if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+ debug_printf(state, "sysrq-g blocked\n");
+ return;
+ }
+ begin_syslog_dump(state);
+ handle_sysrq(rq);
+ end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void do_kgdb(struct fiq_debugger_state *state)
+{
+ if (!fiq_kgdb_enable) {
+ debug_printf(state, "kgdb through fiq debugger not enabled\n");
+ return;
+ }
+
+ debug_printf(state, "enabling console and triggering kgdb\n");
+ state->console_enable = true;
+ handle_sysrq('g');
+}
+#endif
+
+static void debug_schedule_work(struct fiq_debugger_state *state, char *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->work_lock, flags);
+ if (state->work_cmd[0] != '\0') {
+ debug_printf(state, "work command processor busy\n");
+ spin_unlock_irqrestore(&state->work_lock, flags);
+ return;
+ }
+
+ strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+ spin_unlock_irqrestore(&state->work_lock, flags);
+
+ schedule_work(&state->work);
+}
+
+static void debug_work(struct work_struct *work)
+{
+ struct fiq_debugger_state *state;
+ char work_cmd[DEBUG_MAX];
+ char *cmd;
+ unsigned long flags;
+
+ state = container_of(work, struct fiq_debugger_state, work);
+
+ spin_lock_irqsave(&state->work_lock, flags);
+
+ strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+ state->work_cmd[0] = '\0';
+
+ spin_unlock_irqrestore(&state->work_lock, flags);
+
+ cmd = work_cmd;
+ if (!strncmp(cmd, "reboot", 6)) {
+ cmd += 6;
+ while (*cmd == ' ')
+ cmd++;
+ if (cmd != '\0')
+ kernel_restart(cmd);
+ else
+ kernel_restart(NULL);
+ } else {
+ debug_printf(state, "unknown work command '%s'\n", work_cmd);
+ }
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+ if (!strcmp(cmd, "ps"))
+ do_ps(state);
+ if (!strcmp(cmd, "sysrq"))
+ do_sysrq(state, 'h');
+ if (!strncmp(cmd, "sysrq ", 6))
+ do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+ if (!strcmp(cmd, "kgdb"))
+ do_kgdb(state);
+#endif
+ if (!strncmp(cmd, "reboot", 6))
+ debug_schedule_work(state, cmd);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
+{
+ debug_printf(state, "FIQ Debugger commands:\n"
+ " pc PC status\n"
+ " regs Register dump\n"
+ " allregs Extended Register dump\n"
+ " bt Stack trace\n"
+ " reboot [<c>] Reboot with command <c>\n"
+ " reset [<c>] Hard reset with command <c>\n"
+ " irqs Interupt status\n"
+ " kmsg Kernel log\n"
+ " version Kernel version\n");
+ debug_printf(state, " sleep Allow sleep while in FIQ\n"
+ " nosleep Disable sleep while in FIQ\n"
+ " console Switch terminal to console\n"
+ " cpu Current CPU\n"
+ " cpu <number> Switch to CPU<number>\n");
+ debug_printf(state, " ps Process list\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+ debug_printf(state, " kgdb Enter kernel debugger\n");
+#endif
+}
+
+static void take_affinity(void *info)
+{
+ struct fiq_debugger_state *state = info;
+ struct cpumask cpumask;
+
+ cpumask_clear(&cpumask);
+ cpumask_set_cpu(get_cpu(), &cpumask);
+
+ irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+ if (!debug_have_fiq(state))
+ smp_call_function_single(cpu, take_affinity, state, false);
+ state->current_cpu = cpu;
+}
+
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
+ const char *cmd, unsigned *regs, void *svc_sp)
+{
+ bool signal_helper = false;
+
+ if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+ debug_help(state);
+ } else if (!strcmp(cmd, "pc")) {
+ debug_printf(state, " pc %08x cpsr %08x mode %s\n",
+ regs[15], regs[16], mode_name(regs[16]));
+ } else if (!strcmp(cmd, "regs")) {
+ dump_regs(state, regs);
+ } else if (!strcmp(cmd, "allregs")) {
+ dump_allregs(state, regs);
+ } else if (!strcmp(cmd, "bt")) {
+ dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
+ } else if (!strncmp(cmd, "reset", 5)) {
+ cmd += 5;
+ while (*cmd == ' ')
+ cmd++;
+ if (*cmd) {
+ char tmp_cmd[32];
+ strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+ machine_restart(tmp_cmd);
+ } else {
+ machine_restart(NULL);
+ }
+ } else if (!strcmp(cmd, "irqs")) {
+ dump_irqs(state);
+ } else if (!strcmp(cmd, "kmsg")) {
+ dump_kernel_log(state);
+ } else if (!strcmp(cmd, "version")) {
+ debug_printf(state, "%s\n", linux_banner);
+ } else if (!strcmp(cmd, "sleep")) {
+ state->no_sleep = false;
+ debug_printf(state, "enabling sleep\n");
+ } else if (!strcmp(cmd, "nosleep")) {
+ state->no_sleep = true;
+ debug_printf(state, "disabling sleep\n");
+ } else if (!strcmp(cmd, "console")) {
+ debug_printf(state, "console mode\n");
+ debug_uart_flush(state);
+ state->console_enable = true;
+ } else if (!strcmp(cmd, "cpu")) {
+ debug_printf(state, "cpu %d\n", state->current_cpu);
+ } else if (!strncmp(cmd, "cpu ", 4)) {
+ unsigned long cpu = 0;
+ if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+ switch_cpu(state, cpu);
+ else
+ debug_printf(state, "invalid cpu\n");
+ debug_printf(state, "cpu %d\n", state->current_cpu);
+ } else {
+ if (state->debug_busy) {
+ debug_printf(state,
+ "command processor busy. trying to abort.\n");
+ state->debug_abort = -1;
+ } else {
+ strcpy(state->debug_cmd, cmd);
+ state->debug_busy = 1;
+ }
+
+ return true;
+ }
+ if (!state->console_enable)
+ debug_prompt(state);
+
+ return signal_helper;
+}
+
+static void sleep_timer_expired(unsigned long data)
+{
+ struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->uart_enabled && !state->no_sleep) {
+ if (state->debug_enable && !state->console_enable) {
+ state->debug_enable = false;
+ debug_printf_nfiq(state, "suspending fiq debugger\n");
+ }
+ state->ignore_next_wakeup_irq = true;
+ debug_uart_disable(state);
+ state->uart_enabled = false;
+ enable_wakeup_irq(state);
+ }
+ wake_unlock(&state->debugger_wake_lock);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void handle_wakeup(struct fiq_debugger_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+ state->ignore_next_wakeup_irq = false;
+ } else if (!state->uart_enabled) {
+ wake_lock(&state->debugger_wake_lock);
+ debug_uart_enable(state);
+ state->uart_enabled = true;
+ disable_wakeup_irq(state);
+ mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+ }
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t wakeup_irq_handler(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (!state->no_sleep)
+ debug_puts(state, "WAKEUP\n");
+ handle_wakeup(state);
+
+ return IRQ_HANDLED;
+}
+
+
+static void debug_handle_irq_context(struct fiq_debugger_state *state)
+{
+ if (!state->no_sleep) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ wake_lock(&state->debugger_wake_lock);
+ mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+ }
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ if (state->tty) {
+ int i;
+ int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+ for (i = 0; i < count; i++) {
+ int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+ tty_insert_flip_char(state->tty, c, TTY_NORMAL);
+ if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+ pr_warn("fiq tty failed to consume byte\n");
+ }
+ tty_flip_buffer_push(state->tty);
+ }
+#endif
+ if (state->debug_busy) {
+ debug_irq_exec(state, state->debug_cmd);
+ if (!state->console_enable)
+ debug_prompt(state);
+ state->debug_busy = 0;
+ }
+}
+
+static int debug_getc(struct fiq_debugger_state *state)
+{
+ return state->pdata->uart_getc(state->pdev);
+}
+
+static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
+ int this_cpu, void *regs, void *svc_sp)
+{
+ int c;
+ static int last_c;
+ int count = 0;
+ bool signal_helper = false;
+
+ if (this_cpu != state->current_cpu) {
+ if (state->in_fiq)
+ return false;
+
+ if (atomic_inc_return(&state->unhandled_fiq_count) !=
+ MAX_UNHANDLED_FIQ_COUNT)
+ return false;
+
+ debug_printf(state, "fiq_debugger: cpu %d not responding, "
+ "reverting to cpu %d\n", state->current_cpu,
+ this_cpu);
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ switch_cpu(state, this_cpu);
+ return false;
+ }
+
+ state->in_fiq = true;
+
+ while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+ count++;
+ if (!state->debug_enable) {
+ if ((c == 13) || (c == 10)) {
+ state->debug_enable = true;
+ state->debug_count = 0;
+ debug_prompt(state);
+ }
+ } else if (c == FIQ_DEBUGGER_BREAK) {
+ state->console_enable = false;
+ debug_puts(state, "fiq debugger mode\n");
+ state->debug_count = 0;
+ debug_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ } else if (state->console_enable && state->tty_rbuf) {
+ fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+ signal_helper = true;
+#endif
+ } else if ((c >= ' ') && (c < 127)) {
+ if (state->debug_count < (DEBUG_MAX - 1)) {
+ state->debug_buf[state->debug_count++] = c;
+ debug_putc(state, c);
+ }
+ } else if ((c == 8) || (c == 127)) {
+ if (state->debug_count > 0) {
+ state->debug_count--;
+ debug_putc(state, 8);
+ debug_putc(state, ' ');
+ debug_putc(state, 8);
+ }
+ } else if ((c == 13) || (c == 10)) {
+ if (c == '\r' || (c == '\n' && last_c != '\r')) {
+ debug_putc(state, '\r');
+ debug_putc(state, '\n');
+ }
+ if (state->debug_count) {
+ state->debug_buf[state->debug_count] = 0;
+ state->debug_count = 0;
+ signal_helper |=
+ debug_fiq_exec(state, state->debug_buf,
+ regs, svc_sp);
+ } else {
+ debug_prompt(state);
+ }
+ }
+ last_c = c;
+ }
+ if (!state->console_enable)
+ debug_uart_flush(state);
+ if (state->pdata->fiq_ack)
+ state->pdata->fiq_ack(state->pdev, state->fiq);
+
+ /* poke sleep timer if necessary */
+ if (state->debug_enable && !state->no_sleep)
+ signal_helper = true;
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ state->in_fiq = false;
+
+ return signal_helper;
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+ bool need_irq;
+
+ need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
+ if (need_irq)
+ debug_force_irq(state);
+}
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t debug_uart_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+ bool not_done;
+
+ handle_wakeup(state);
+
+ /* handle the debugger irq in regular context */
+ not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
+ get_irq_regs(),
+ current_thread_info());
+ if (not_done)
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t debug_signal_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (state->pdata->force_irq_ack)
+ state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+static void debug_resume(struct fiq_glue_handler *h)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ if (state->pdata->uart_resume)
+ state->pdata->uart_resume(state->pdev);
+}
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *debug_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return fiq_tty_driver;
+}
+
+static void debug_console_write(struct console *co,
+ const char *s, unsigned int count)
+{
+ struct fiq_debugger_state *state;
+ unsigned long flags;
+
+ state = container_of(co, struct fiq_debugger_state, console);
+
+ if (!state->console_enable && !state->syslog_dumping)
+ return;
+
+ debug_uart_enable(state);
+ spin_lock_irqsave(&state->console_lock, flags);
+ while (count--) {
+ if (*s == '\n')
+ debug_putc(state, '\r');
+ debug_putc(state, *s++);
+ }
+ debug_uart_flush(state);
+ spin_unlock_irqrestore(&state->console_lock, flags);
+ debug_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+ .name = "ttyFIQ",
+ .device = debug_console_device,
+ .write = debug_console_write,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ int line = tty->index;
+ struct fiq_debugger_state **states = tty->driver->driver_state;
+ struct fiq_debugger_state *state = states[line];
+ if (state->tty_open_count++)
+ return 0;
+
+ tty->driver_data = state;
+ state->tty = tty;
+ return 0;
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct fiq_debugger_state *state = tty->driver_data;
+ if (--state->tty_open_count)
+ return;
+ state->tty = NULL;
+}
+
+int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ struct fiq_debugger_state *state = tty->driver_data;
+
+ if (!state->console_enable)
+ return count;
+
+ debug_uart_enable(state);
+ spin_lock_irq(&state->console_lock);
+ for (i = 0; i < count; i++)
+ debug_putc(state, *buf++);
+ spin_unlock_irq(&state->console_lock);
+ debug_uart_disable(state);
+
+ return count;
+}
+
+int fiq_tty_write_room(struct tty_struct *tty)
+{
+ return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+ return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+ struct fiq_debugger_state *state = driver->ttys[line]->driver_data;
+ int c = NO_POLL_CHAR;
+
+ debug_uart_enable(state);
+ if (debug_have_fiq(state)) {
+ int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+ if (count > 0) {
+ c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+ fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+ }
+ } else {
+ c = debug_getc(state);
+ if (c == FIQ_DEBUGGER_NO_CHAR)
+ c = NO_POLL_CHAR;
+ }
+ debug_uart_disable(state);
+
+ return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+ struct fiq_debugger_state *state = driver->ttys[line]->driver_data;
+ debug_uart_enable(state);
+ debug_putc(state, ch);
+ debug_uart_disable(state);
+}
+#endif
+
+static const struct tty_operations fiq_tty_driver_ops = {
+ .write = fiq_tty_write,
+ .write_room = fiq_tty_write_room,
+ .open = fiq_tty_open,
+ .close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = fiq_tty_poll_init,
+ .poll_get_char = fiq_tty_poll_get_char,
+ .poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+ int ret;
+ struct fiq_debugger_state **states = NULL;
+
+ states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+ if (!states) {
+ pr_err("Failed to allocate fiq debugger state structres\n");
+ return -ENOMEM;
+ }
+
+ fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+ if (!fiq_tty_driver) {
+ pr_err("Failed to allocate fiq debugger tty\n");
+ ret = -ENOMEM;
+ goto err_free_state;
+ }
+
+ fiq_tty_driver->owner = THIS_MODULE;
+ fiq_tty_driver->driver_name = "fiq-debugger";
+ fiq_tty_driver->name = "ttyFIQ";
+ fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ fiq_tty_driver->init_termios = tty_std_termios;
+ fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
+ fiq_tty_driver->driver_state = states;
+
+ fiq_tty_driver->init_termios.c_cflag =
+ B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+ fiq_tty_driver->init_termios.c_ispeed = 115200;
+ fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+ tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+ ret = tty_register_driver(fiq_tty_driver);
+ if (ret) {
+ pr_err("Failed to register fiq tty: %d\n", ret);
+ goto err_free_tty;
+ }
+
+ pr_info("Registered FIQ tty driver\n");
+ return 0;
+
+err_free_tty:
+ put_tty_driver(fiq_tty_driver);
+ fiq_tty_driver = NULL;
+err_free_state:
+ kfree(states);
+ return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+ int ret;
+ struct device *tty_dev;
+ struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+ states[state->pdev->id] = state;
+
+ state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+ if (!state->tty_rbuf) {
+ pr_err("Failed to allocate fiq debugger ringbuf\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tty_dev = tty_register_device(fiq_tty_driver, state->pdev->id,
+ &state->pdev->dev);
+ if (IS_ERR(tty_dev)) {
+ pr_err("Failed to register fiq debugger tty device\n");
+ ret = PTR_ERR(tty_dev);
+ goto err;
+ }
+
+ device_set_wakeup_capable(tty_dev, 1);
+
+ pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+ return 0;
+
+err:
+ fiq_debugger_ringbuf_free(state->tty_rbuf);
+ state->tty_rbuf = NULL;
+ return ret;
+}
+#else
+static int fiq_debugger_tty_init(void) { return 0; }
+#endif /* CONFIG_FIQ_DEBUGGER_CONSOLE */
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_suspend)
+ return state->pdata->uart_dev_suspend(pdev);
+ return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_resume)
+ return state->pdata->uart_dev_resume(pdev);
+ return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fiq_debugger_state *state;
+ int fiq;
+ int uart_irq;
+
+ if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+ return -EINVAL;
+
+ if (!pdata->uart_getc || !pdata->uart_putc)
+ return -EINVAL;
+ if ((pdata->uart_enable && !pdata->uart_disable) ||
+ (!pdata->uart_enable && pdata->uart_disable))
+ return -EINVAL;
+
+ fiq = platform_get_irq_byname(pdev, "fiq");
+ uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+ /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+ * is required */
+ if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+ return -EINVAL;
+ if (fiq >= 0 && !pdata->fiq_enable)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ setup_timer(&state->sleep_timer, sleep_timer_expired,
+ (unsigned long)state);
+ state->pdata = pdata;
+ state->pdev = pdev;
+ state->no_sleep = initial_no_sleep;
+ state->debug_enable = initial_debug_enable;
+ state->console_enable = initial_console_enable;
+
+ state->fiq = fiq;
+ state->uart_irq = uart_irq;
+ state->signal_irq = platform_get_irq_byname(pdev, "signal");
+ state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+ INIT_WORK(&state->work, debug_work);
+ spin_lock_init(&state->work_lock);
+
+ platform_set_drvdata(pdev, state);
+
+ spin_lock_init(&state->sleep_timer_lock);
+
+ if (state->wakeup_irq < 0 && debug_have_fiq(state))
+ state->no_sleep = true;
+ state->ignore_next_wakeup_irq = !state->no_sleep;
+
+ wake_lock_init(&state->debugger_wake_lock,
+ WAKE_LOCK_SUSPEND, "serial-debug");
+
+ state->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(state->clk))
+ state->clk = NULL;
+
+ /* do not call pdata->uart_enable here since uart_init may still
+ * need to do some initialization before uart_enable can work.
+ * So, only try to manage the clock during init.
+ */
+ if (state->clk)
+ clk_enable(state->clk);
+
+ if (pdata->uart_init) {
+ ret = pdata->uart_init(pdev);
+ if (ret)
+ goto err_uart_init;
+ }
+
+ debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
+ state->no_sleep ? "" : "twice ");
+
+ if (debug_have_fiq(state)) {
+ state->handler.fiq = debug_fiq;
+ state->handler.resume = debug_resume;
+ ret = fiq_glue_register_handler(&state->handler);
+ if (ret) {
+ pr_err("%s: could not install fiq handler\n", __func__);
+ goto err_register_fiq;
+ }
+
+ pdata->fiq_enable(pdev, state->fiq, 1);
+ } else {
+ ret = request_irq(state->uart_irq, debug_uart_irq,
+ IRQF_NO_SUSPEND, "debug", state);
+ if (ret) {
+ pr_err("%s: could not install irq handler\n", __func__);
+ goto err_register_irq;
+ }
+
+ /* for irq-only mode, we want this irq to wake us up, if it
+ * can.
+ */
+ enable_irq_wake(state->uart_irq);
+ }
+
+ if (state->clk)
+ clk_disable(state->clk);
+
+ if (state->signal_irq >= 0) {
+ ret = request_irq(state->signal_irq, debug_signal_irq,
+ IRQF_TRIGGER_RISING, "debug-signal", state);
+ if (ret)
+ pr_err("serial_debugger: could not install signal_irq");
+ }
+
+ if (state->wakeup_irq >= 0) {
+ ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ "debug-wakeup", state);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not install wakeup irq\n");
+ state->wakeup_irq = -1;
+ } else {
+ ret = enable_irq_wake(state->wakeup_irq);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not enable wakeup\n");
+ state->wakeup_irq_no_set_wake = true;
+ }
+ }
+ }
+ if (state->no_sleep)
+ handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ spin_lock_init(&state->console_lock);
+ state->console = fiq_debugger_console;
+ state->console.index = pdev->id;
+ if (!console_set_on_cmdline)
+ add_preferred_console(state->console.name,
+ state->console.index, NULL);
+ register_console(&state->console);
+ fiq_debugger_tty_init_one(state);
+#endif
+ return 0;
+
+err_register_irq:
+err_register_fiq:
+ if (pdata->uart_free)
+ pdata->uart_free(pdev);
+err_uart_init:
+ if (state->clk)
+ clk_disable(state->clk);
+ if (state->clk)
+ clk_put(state->clk);
+ wake_lock_destroy(&state->debugger_wake_lock);
+ platform_set_drvdata(pdev, NULL);
+ kfree(state);
+ return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+ .suspend = fiq_debugger_dev_suspend,
+ .resume = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+ .probe = fiq_debugger_probe,
+ .driver = {
+ .name = "fiq_debugger",
+ .pm = &fiq_debugger_dev_pm_ops,
+ },
+};
+
+static int __init fiq_debugger_init(void)
+{
+ fiq_debugger_tty_init();
+ return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
new file mode 100644
index 000000000000..2649b5581088
--- /dev/null
+++ b/arch/arm/common/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/common/fiq_debugger_ringbuf.c
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+ int len;
+ int head;
+ int tail;
+ u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+ struct fiq_debugger_ringbuf *rbuf;
+
+ rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+ if (rbuf == NULL)
+ return NULL;
+
+ rbuf->len = len;
+ rbuf->head = 0;
+ rbuf->tail = 0;
+ smp_mb();
+
+ return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+ kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+ int level = rbuf->head - rbuf->tail;
+
+ if (level < 0)
+ level = rbuf->len + level;
+
+ return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+ return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+ return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+ count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+ rbuf->tail = (rbuf->tail + count) % rbuf->len;
+ smp_mb();
+
+ return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+ if (fiq_debugger_ringbuf_room(rbuf) == 0)
+ return 0;
+
+ rbuf->buf[rbuf->head] = datum;
+ smp_mb();
+ rbuf->head = (rbuf->head + 1) % rbuf->len;
+ smp_mb();
+
+ return 1;
+}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 000000000000..9e3455a09f8f
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .global fiq_glue_end
+
+ /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+ /* store pc, cpsr from previous mode */
+ mrs r12, spsr
+ sub r11, lr, #4
+ subs r10, #1
+ bne nested_fiq
+
+ stmfd sp!, {r11-r12, lr}
+
+ /* store r8-r14 from previous mode */
+ sub sp, sp, #(7 * 4)
+ stmia sp, {r8-r14}^
+ nop
+
+ /* store r0-r7 from previous mode */
+ stmfd sp!, {r0-r7}
+
+ /* setup func(data,regs) arguments */
+ mov r0, r9
+ mov r1, sp
+ mov r3, r8
+
+ mov r7, sp
+
+ /* Get sp and lr from non-user modes */
+ and r4, r12, #MODE_MASK
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode
+
+ mov r7, sp
+ orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+ msr cpsr_c, r4
+ str sp, [r7, #(4 * 13)]
+ str lr, [r7, #(4 * 14)]
+ mrs r5, spsr
+ str r5, [r7, #(4 * 17)]
+
+ cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ /* use fiq stack if we reenter this mode */
+ subne sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+ msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ mov r2, sp
+ sub sp, r7, #12
+ stmfd sp!, {r2, ip, lr}
+ /* call func(data,regs) */
+ blx r3
+ ldmfd sp, {r2, ip, lr}
+ mov sp, r2
+
+ /* restore/discard saved state */
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode_exit
+
+ msr cpsr_c, r4
+ ldr sp, [r7, #(4 * 13)]
+ ldr lr, [r7, #(4 * 14)]
+ msr spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+ ldmfd sp!, {r0-r7}
+ add sp, sp, #(7 * 4)
+ ldmfd sp!, {r11-r12, lr}
+exit_fiq:
+ msr spsr_cxsf, r12
+ add r10, #1
+ movs pc, r11
+
+nested_fiq:
+ orr r12, r12, #(PSR_F_BIT)
+ b exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+ mrs r3, cpsr
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+ movs r8, r0
+ mov r9, r1
+ mov sp, r2
+ moveq r10, #0
+ movne r10, #1
+ msr cpsr_c, r3
+ bx lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 000000000000..4044c7db95c8
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+ .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+ struct fiq_glue_handler *handler = info;
+ fiq_glue_setup(handler->fiq, handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+ int ret;
+ int cpu;
+
+ if (!handler || !handler->fiq)
+ return -EINVAL;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_stack) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+
+ for_each_possible_cpu(cpu) {
+ void *stack;
+ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack)) {
+ ret = -ENOMEM;
+ goto err_alloc_fiq_stack;
+ }
+ per_cpu(fiq_stack, cpu) = stack;
+ }
+
+ ret = claim_fiq(&fiq_debbuger_fiq_handler);
+ if (WARN_ON(ret))
+ goto err_claim_fiq;
+
+ current_handler = handler;
+ on_each_cpu(fiq_glue_setup_helper, handler, true);
+ set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+ mutex_unlock(&fiq_glue_lock);
+ return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+ for_each_possible_cpu(cpu) {
+ __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+ per_cpu(fiq_stack, cpu) = NULL;
+ }
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+ return ret;
+}
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+ if (!current_handler)
+ return;
+ fiq_glue_setup(current_handler->fiq, current_handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ if (current_handler->resume)
+ current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd12..4e8217b204af 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -16,6 +16,7 @@
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
+#include <asm/rodata.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
new file mode 100644
index 000000000000..4d274883ba6a
--- /dev/null
+++ b/arch/arm/include/asm/fiq_debugger.h
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/include/asm/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume: used to restore uart state right before enabling
+ * the fiq.
+ * @uart_enable: Do the work necessary to communicate with the uart
+ * hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable: Do the work necessary to disable the uart hw
+ * (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend: called during PM suspend, generally not needed
+ * for real fiq mode debugger.
+ * @uart_dev_resume: called during PM resume, generally not needed
+ * for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+ int (*uart_init)(struct platform_device *pdev);
+ void (*uart_free)(struct platform_device *pdev);
+ int (*uart_resume)(struct platform_device *pdev);
+ int (*uart_getc)(struct platform_device *pdev);
+ void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+ void (*uart_flush)(struct platform_device *pdev);
+ void (*uart_enable)(struct platform_device *pdev);
+ void (*uart_disable)(struct platform_device *pdev);
+
+ int (*uart_dev_suspend)(struct platform_device *pdev);
+ int (*uart_dev_resume)(struct platform_device *pdev);
+
+ void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+ bool enable);
+ void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+ void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+ void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644
index 000000000000..d54c29db97a8
--- /dev/null
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+ void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+ void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..3d7351c844aa 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..dcf74d715f2b 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -17,15 +17,23 @@
#define TRACER_ACCESSED_BIT 0
#define TRACER_RUNNING_BIT 1
#define TRACER_CYCLE_ACC_BIT 2
+#define TRACER_TRACE_DATA_BIT 3
+#define TRACER_TIMESTAMP_BIT 4
+#define TRACER_BRANCHOUTPUT_BIT 5
+#define TRACER_RETURN_STACK_BIT 6
#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA BIT(TRACER_TRACE_DATA_BIT)
+#define TRACER_TIMESTAMP BIT(TRACER_TIMESTAMP_BIT)
+#define TRACER_BRANCHOUTPUT BIT(TRACER_BRANCHOUTPUT_BIT)
+#define TRACER_RETURN_STACK BIT(TRACER_RETURN_STACK_BIT)
#define TRACER_TIMEOUT 10000
-#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+ (__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -43,7 +51,7 @@
#define ETMCTRL_POWERDOWN 1
#define ETMCTRL_PROGRAM (1 << 10)
#define ETMCTRL_PORTSEL (1 << 11)
-#define ETMCTRL_DO_CONTEXTID (3 << 14)
+#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
#define ETMCTRL_PORTMASK1 (7 << 4)
#define ETMCTRL_PORTMASK2 (1 << 21)
#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
@@ -55,9 +63,12 @@
#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
#define ETMCTRL_CYCLEACCURATE (1 << 12)
+#define ETMCTRL_TIMESTAMP_EN (1 << 28)
+#define ETMCTRL_RETURN_STACK_EN (1 << 29)
/* ETM configuration code register */
#define ETMR_CONFCODE (0x04)
+#define ETMCCR_ETMIDR_PRESENT BIT(31)
/* ETM trace start/stop resource control register */
#define ETMR_TRACESSCTRL (0x18)
@@ -113,10 +124,25 @@
#define ETMR_TRACEENCTRL 0x24
#define ETMTE_INCLEXCL BIT(24)
#define ETMR_TRACEENEVT 0x20
-#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
- ETMCTRL_DATA_DO_ADDR | \
- ETMCTRL_BRANCH_OUTPUT | \
- ETMCTRL_DO_CONTEXTID)
+
+#define ETMR_VIEWDATAEVT 0x30
+#define ETMR_VIEWDATACTRL1 0x34
+#define ETMR_VIEWDATACTRL2 0x38
+#define ETMR_VIEWDATACTRL3 0x3c
+#define ETMVDC3_EXCLONLY BIT(16)
+
+#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT)
+
+#define ETMR_ID 0x1e4
+#define ETMIDR_VERSION(x) (((x) >> 4) & 0xff)
+#define ETMIDR_VERSION_3_1 0x21
+#define ETMIDR_VERSION_PFT_1_0 0x30
+
+#define ETMR_CCE 0x1e8
+#define ETMCCER_RETURN_STACK_IMPLEMENTED BIT(23)
+#define ETMCCER_TIMESTAMPING_IMPLEMENTED BIT(22)
+
+#define ETMR_TRACEIDR 0x200
/* ETM management registers, "ETM Architecture", 3.5.24 */
#define ETMMR_OSLAR 0x300
@@ -140,14 +166,16 @@
#define ETBFF_TRIGIN BIT(8)
#define ETBFF_TRIGEVT BIT(9)
#define ETBFF_TRIGFL BIT(10)
+#define ETBFF_STOPFL BIT(12)
#define etb_writel(t, v, x) \
(__raw_writel((v), (t)->etb_regs + (x)))
#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
- do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+ do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+ do { etm_writel((t), (id), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f1..eef55ea9ef00 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_HDBGEN (1 << 14)
#define ARM_DSCR_MDBGEN (1 << 15)
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0 (1 << 0)
+
/* opcode2 numbers for the co-processor instructions. */
#define ARM_OP2_BVR 4
#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 35c21c375d81..3e0857a6248e 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -30,6 +30,9 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
#endif
#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644
index 000000000000..bca864ac945f
--- /dev/null
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+ struct sdio_cis cis;
+ struct sdio_cccr cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+};
+
+struct mmc_platform_data {
+ unsigned int ocr_mask; /* available voltages */
+ int built_in; /* built-in device flag */
+ int card_present; /* card detect state */
+ u32 (*translate_vdd)(struct device *, unsigned int);
+ unsigned int (*status)(struct device *);
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c70..0cd7824ca762 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -62,9 +62,19 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ cpumask_t valid_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -81,6 +91,8 @@ struct arm_pmu {
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
new file mode 100644
index 000000000000..8c8add87bbc5
--- /dev/null
+++ b/arch/arm/include/asm/rodata.h
@@ -0,0 +1,32 @@
+/*
+ * arch/arm/include/asm/rodata.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_RODATA_H
+#define _ASMARM_RODATA_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_DEBUG_RODATA
+
+int set_memory_rw(unsigned long virt, int numpages);
+int set_memory_ro(unsigned long virt, int numpages);
+
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22bebe6ce..c5aa088c0a8b 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -81,6 +81,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern void smp_send_all_cpu_backtrace(void);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd2..983fa7c153a2 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -26,11 +26,45 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline int cluster_to_logical_mask(unsigned int socket_id,
+ cpumask_t *cluster_mask) { return -EINVAL; }
#endif
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 9b6de8c988f3..a8433bdee002 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/clk.h>
@@ -37,26 +38,37 @@ MODULE_AUTHOR("Alexander Shishkin");
struct tracectx {
unsigned int etb_bufsz;
void __iomem *etb_regs;
- void __iomem *etm_regs;
+ void __iomem **etm_regs;
+ int etm_regs_count;
unsigned long flags;
int ncmppairs;
int etm_portsz;
+ int etm_contextid_size;
+ u32 etb_fc;
+ unsigned long range_start;
+ unsigned long range_end;
+ unsigned long data_range_start;
+ unsigned long data_range_end;
+ bool dump_initial_etb;
struct device *dev;
struct clk *emu_clk;
struct mutex mutex;
};
-static struct tracectx tracer;
+static struct tracectx tracer = {
+ .range_start = (unsigned long)_stext,
+ .range_end = (unsigned long)_etext,
+};
static inline bool trace_isrunning(struct tracectx *t)
{
return !!(t->flags & TRACER_RUNNING);
}
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
unsigned long start, unsigned long end, int exclude, int data)
{
- u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+ u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
ETMAAT_NOVALCMP;
if (n < 1 || n > t->ncmppairs)
@@ -72,95 +84,185 @@ static int etm_setup_address_range(struct tracectx *t, int n,
flags |= ETMAAT_IEXEC;
/* first comparator for the range */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
- etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+ etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
/* second comparator is right next to it */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
- etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
- flags = exclude ? ETMTE_INCLEXCL : 0;
- etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+ etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+ etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+ if (data) {
+ flags = exclude ? ETMVDC3_EXCLONLY : 0;
+ if (exclude)
+ n += 8;
+ etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+ } else {
+ flags = exclude ? ETMTE_INCLEXCL : 0;
+ etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+ }
return 0;
}
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
{
u32 v;
unsigned long timeout = TRACER_TIMEOUT;
- etb_unlock(t);
-
- etb_writel(t, 0, ETBR_FORMATTERCTRL);
- etb_writel(t, 1, ETBR_CTRL);
-
- etb_lock(t);
-
- /* configure etm */
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
+ v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
if (t->flags & TRACER_CYCLE_ACC)
v |= ETMCTRL_CYCLEACCURATE;
- etm_unlock(t);
+ if (t->flags & TRACER_BRANCHOUTPUT)
+ v |= ETMCTRL_BRANCH_OUTPUT;
+
+ if (t->flags & TRACER_TRACE_DATA)
+ v |= ETMCTRL_DATA_DO_ADDR;
+
+ if (t->flags & TRACER_TIMESTAMP)
+ v |= ETMCTRL_TIMESTAMP_EN;
+
+ if (t->flags & TRACER_RETURN_STACK)
+ v |= ETMCTRL_RETURN_STACK_EN;
- etm_writel(t, v, ETMR_CTRL);
+ etm_unlock(t, id);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ etm_writel(t, id, v, ETMR_CTRL);
+
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_setup_address_range(t, 1, (unsigned long)_stext,
- (unsigned long)_etext, 0, 0);
- etm_writel(t, 0, ETMR_TRACEENCTRL2);
- etm_writel(t, 0, ETMR_TRACESSCTRL);
- etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+ if (t->range_start || t->range_end)
+ etm_setup_address_range(t, id, 1,
+ t->range_start, t->range_end, 0, 0);
+ else
+ etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+ etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+ etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+ etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+ etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+ if (t->data_range_start || t->data_range_end)
+ etm_setup_address_range(t, id, 2, t->data_range_start,
+ t->data_range_end, 0, 1);
+ else
+ etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+ etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
v &= ~ETMCTRL_PROGRAM;
v |= ETMCTRL_PORTSEL;
- etm_writel(t, v, ETMR_CTRL);
+ etm_writel(t, id, v, ETMR_CTRL);
timeout = TRACER_TIMEOUT;
- while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+ while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
- etm_lock(t);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+ int ret;
+ int id;
+ u32 etb_fc = t->etb_fc;
+
+ etb_unlock(t);
+
+ t->dump_initial_etb = false;
+ etb_writel(t, 0, ETBR_WRITEADDR);
+ etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+ etb_writel(t, 1, ETBR_CTRL);
+
+ etb_lock(t);
+
+ /* configure etm(s) */
+ for (id = 0; id < t->etm_regs_count; id++) {
+ ret = trace_start_etm(t, id);
+ if (ret)
+ return ret;
+ }
t->flags |= TRACER_RUNNING;
return 0;
}
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
{
unsigned long timeout = TRACER_TIMEOUT;
- etm_unlock(t);
+ etm_unlock(t, id);
- etm_writel(t, 0x440, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+ etm_writel(t, id, 0x440, ETMR_CTRL);
+ while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
+ dev_err(t->dev,
+ "etm%d: Waiting for progbit to assert timed out\n",
+ id);
+ etm_lock(t, id);
return -EFAULT;
}
- etm_lock(t);
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_power_down_etm(struct tracectx *t, int id)
+{
+ unsigned long timeout = TRACER_TIMEOUT;
+ etm_unlock(t, id);
+ while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
+ ;
+ if (!timeout) {
+ dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
+ id);
+ etm_lock(t, id);
+ return -EFAULT;
+ }
+
+ etm_writel(t, id, 0x441, ETMR_CTRL);
+
+ etm_lock(t, id);
+ return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+ int id;
+ unsigned long timeout = TRACER_TIMEOUT;
+ u32 etb_fc = t->etb_fc;
+
+ for (id = 0; id < t->etm_regs_count; id++)
+ trace_stop_etm(t, id);
+
+ for (id = 0; id < t->etm_regs_count; id++)
+ trace_power_down_etm(t, id);
etb_unlock(t);
- etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+ if (etb_fc) {
+ etb_fc |= ETBFF_STOPFL;
+ etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+ }
+ etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
timeout = TRACER_TIMEOUT;
while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -185,24 +287,15 @@ static int trace_stop(struct tracectx *t)
static int etb_getdatalen(struct tracectx *t)
{
u32 v;
- int rp, wp;
+ int wp;
v = etb_readl(t, ETBR_STATUS);
if (v & 1)
return t->etb_bufsz;
- rp = etb_readl(t, ETBR_READADDR);
wp = etb_readl(t, ETBR_WRITEADDR);
-
- if (rp > wp) {
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- return 0;
- }
-
- return wp - rp;
+ return wp;
}
/* sysrq+v will always stop the running trace and leave it at that */
@@ -235,21 +328,18 @@ static void etm_dump(void)
printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
printk(KERN_INFO "\n--- ETB buffer end ---\n");
- /* deassert the overflow bit */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
etb_lock(t);
}
static void sysrq_etm_dump(int key)
{
+ if (!mutex_trylock(&tracer.mutex)) {
+ printk(KERN_INFO "Tracing hardware busy\n");
+ return;
+ }
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
+ mutex_unlock(&tracer.mutex);
}
static struct sysrq_key_op sysrq_etm_op = {
@@ -276,6 +366,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
struct tracectx *t = file->private_data;
u32 first = 0;
u32 *buf;
+ int wpos;
+ int skip;
+ long wlength;
+ loff_t pos = *ppos;
mutex_lock(&t->mutex);
@@ -287,31 +381,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
etb_unlock(t);
total = etb_getdatalen(t);
+ if (total == 0 && t->dump_initial_etb)
+ total = t->etb_bufsz;
if (total == t->etb_bufsz)
first = etb_readl(t, ETBR_WRITEADDR);
+ if (pos > total * 4) {
+ skip = 0;
+ wpos = total;
+ } else {
+ skip = (int)pos % 4;
+ wpos = (int)pos / 4;
+ }
+ total -= wpos;
+ first = (first + wpos) % t->etb_bufsz;
+
etb_writel(t, first, ETBR_READADDR);
- length = min(total * 4, (int)len);
- buf = vmalloc(length);
+ wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+ length = min(total * 4 - skip, (int)len);
+ buf = vmalloc(wlength * 4);
- dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+ dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+ length, pos, wlength, first);
+ dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
- for (i = 0; i < length / 4; i++)
+ for (i = 0; i < wlength; i++)
buf[i] = etb_readl(t, ETBR_READMEM);
- /* the only way to deassert overflow bit in ETB status is this */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_WRITEADDR);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
etb_lock(t);
- length -= copy_to_user(data, buf, length);
+ length -= copy_to_user(data, (u8 *)buf + skip, length);
vfree(buf);
+ *ppos = pos + length;
out:
mutex_unlock(&t->mutex);
@@ -348,28 +450,17 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto out;
+ mutex_lock(&t->mutex);
t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
if (!t->etb_regs) {
ret = -ENOMEM;
goto out_release;
}
+ t->dev = &dev->dev;
+ t->dump_initial_etb = true;
amba_set_drvdata(dev, t);
- etb_miscdev.parent = &dev->dev;
-
- ret = misc_register(&etb_miscdev);
- if (ret)
- goto out_unmap;
-
- t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
- if (IS_ERR(t->emu_clk)) {
- dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
- return -EFAULT;
- }
-
- clk_enable(t->emu_clk);
-
etb_unlock(t);
t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -378,6 +469,20 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
etb_writel(t, 0, ETBR_CTRL);
etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
etb_lock(t);
+ mutex_unlock(&t->mutex);
+
+ etb_miscdev.parent = &dev->dev;
+
+ ret = misc_register(&etb_miscdev);
+ if (ret)
+ goto out_unmap;
+
+ /* Get optional clock. Currently used to select clock source on omap3 */
+ t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+ if (IS_ERR(t->emu_clk))
+ dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+ else
+ clk_enable(t->emu_clk);
dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
@@ -385,10 +490,13 @@ out:
return ret;
out_unmap:
+ mutex_lock(&t->mutex);
amba_set_drvdata(dev, NULL);
iounmap(t->etb_regs);
+ t->etb_regs = NULL;
out_release:
+ mutex_unlock(&t->mutex);
amba_release_regions(dev);
return ret;
@@ -403,8 +511,10 @@ static int etb_remove(struct amba_device *dev)
iounmap(t->etb_regs);
t->etb_regs = NULL;
- clk_disable(t->emu_clk);
- clk_put(t->emu_clk);
+ if (!IS_ERR(t->emu_clk)) {
+ clk_disable(t->emu_clk);
+ clk_put(t->emu_clk);
+ }
amba_release_regions(dev);
@@ -448,7 +558,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
return -EINVAL;
mutex_lock(&tracer.mutex);
- ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+ if (!tracer.etb_regs)
+ ret = -ENODEV;
+ else
+ ret = value ? trace_start(&tracer) : trace_stop(&tracer);
mutex_unlock(&tracer.mutex);
return ret ? : n;
@@ -463,36 +576,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
{
u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
int datalen;
+ int id;
+ int ret;
- etb_unlock(&tracer);
- datalen = etb_getdatalen(&tracer);
- etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
- etb_ra = etb_readl(&tracer, ETBR_READADDR);
- etb_st = etb_readl(&tracer, ETBR_STATUS);
- etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
- etb_lock(&tracer);
-
- etm_unlock(&tracer);
- etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
- etm_st = etm_readl(&tracer, ETMR_STATUS);
- etm_lock(&tracer);
+ mutex_lock(&tracer.mutex);
+ if (tracer.etb_regs) {
+ etb_unlock(&tracer);
+ datalen = etb_getdatalen(&tracer);
+ etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+ etb_ra = etb_readl(&tracer, ETBR_READADDR);
+ etb_st = etb_readl(&tracer, ETBR_STATUS);
+ etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+ etb_lock(&tracer);
+ } else {
+ etb_wa = etb_ra = etb_st = etb_fc = ~0;
+ datalen = -1;
+ }
- return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+ ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
"ETBR_WRITEADDR:\t%08x\n"
"ETBR_READADDR:\t%08x\n"
"ETBR_STATUS:\t%08x\n"
- "ETBR_FORMATTERCTRL:\t%08x\n"
- "ETMR_CTRL:\t%08x\n"
- "ETMR_STATUS:\t%08x\n",
+ "ETBR_FORMATTERCTRL:\t%08x\n",
datalen,
tracer.ncmppairs,
etb_wa,
etb_ra,
etb_st,
- etb_fc,
+ etb_fc
+ );
+
+ for (id = 0; id < tracer.etm_regs_count; id++) {
+ etm_unlock(&tracer, id);
+ etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+ etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+ etm_lock(&tracer, id);
+ ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+ "ETMR_STATUS:\t%08x\n",
etm_ctrl,
etm_st
);
+ }
+ mutex_unlock(&tracer.mutex);
+
+ return ret;
}
static struct kobj_attribute trace_info_attr =
@@ -531,42 +658,260 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
+static ssize_t trace_contextid_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
+ return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
+}
+
+static ssize_t trace_contextid_size_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned int contextid_size;
+
+ if (sscanf(buf, "%u", &contextid_size) != 1)
+ return -EINVAL;
+
+ if (contextid_size == 3 || contextid_size > 4)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.etm_contextid_size = fls(contextid_size);
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+static struct kobj_attribute trace_contextid_size_attr =
+ __ATTR(trace_contextid_size, 0644,
+ trace_contextid_size_show, trace_contextid_size_store);
+
+static ssize_t trace_branch_output_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
+}
+
+static ssize_t trace_branch_output_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned int branch_output;
+
+ if (sscanf(buf, "%u", &branch_output) != 1)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ if (branch_output) {
+ tracer.flags |= TRACER_BRANCHOUTPUT;
+ /* Branch broadcasting is incompatible with the return stack */
+ tracer.flags &= ~TRACER_RETURN_STACK;
+ } else {
+ tracer.flags &= ~TRACER_BRANCHOUTPUT;
+ }
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+static struct kobj_attribute trace_branch_output_attr =
+ __ATTR(trace_branch_output, 0644,
+ trace_branch_output_show, trace_branch_output_store);
+
+static ssize_t trace_return_stack_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
+}
+
+static ssize_t trace_return_stack_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned int return_stack;
+
+ if (sscanf(buf, "%u", &return_stack) != 1)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ if (return_stack) {
+ tracer.flags |= TRACER_RETURN_STACK;
+ /* Return stack is incompatible with branch broadcasting */
+ tracer.flags &= ~TRACER_BRANCHOUTPUT;
+ } else {
+ tracer.flags &= ~TRACER_RETURN_STACK;
+ }
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+static struct kobj_attribute trace_return_stack_attr =
+ __ATTR(trace_return_stack, 0644,
+ trace_return_stack_show, trace_return_stack_store);
+
+static ssize_t trace_timestamp_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
+}
+
+static ssize_t trace_timestamp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned int timestamp;
+
+ if (sscanf(buf, "%u", &timestamp) != 1)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ if (timestamp)
+ tracer.flags |= TRACER_TIMESTAMP;
+ else
+ tracer.flags &= ~TRACER_TIMESTAMP;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+static struct kobj_attribute trace_timestamp_attr =
+ __ATTR(trace_timestamp, 0644,
+ trace_timestamp_show, trace_timestamp_store);
+
+static ssize_t trace_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%08lx %08lx\n",
+ tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start, range_end;
+
+ if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.range_start = range_start;
+ tracer.range_end = range_end;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+ __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long range_start;
+ u64 range_end;
+ mutex_lock(&tracer.mutex);
+ range_start = tracer.data_range_start;
+ range_end = tracer.data_range_end;
+ if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+ range_end = 0x100000000ULL;
+ mutex_unlock(&tracer.mutex);
+ return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long range_start;
+ u64 range_end;
+
+ if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+ return -EINVAL;
+
+ mutex_lock(&tracer.mutex);
+ tracer.data_range_start = range_start;
+ tracer.data_range_end = (unsigned long)range_end;
+ if (range_end)
+ tracer.flags |= TRACER_TRACE_DATA;
+ else
+ tracer.flags &= ~TRACER_TRACE_DATA;
+ mutex_unlock(&tracer.mutex);
+
+ return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+ __ATTR(trace_data_range, 0644,
+ trace_data_range_show, trace_data_range_store);
+
static int etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
+ void __iomem **new_regs;
+ int new_count;
+ u32 etmccr;
+ u32 etmidr;
+ u32 etmccer = 0;
+ u8 etm_version = 0;
+
+ mutex_lock(&t->mutex);
+ new_count = t->etm_regs_count + 1;
+ new_regs = krealloc(t->etm_regs,
+ sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
- if (t->etm_regs) {
- dev_dbg(&dev->dev, "ETM already initialized\n");
- ret = -EBUSY;
+ if (!new_regs) {
+ dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+ ret = -ENOMEM;
goto out;
}
+ t->etm_regs = new_regs;
ret = amba_request_regions(dev, NULL);
if (ret)
goto out;
- t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etm_regs) {
+ t->etm_regs[t->etm_regs_count] =
+ ioremap_nocache(dev->res.start, resource_size(&dev->res));
+ if (!t->etm_regs[t->etm_regs_count]) {
ret = -ENOMEM;
goto out_release;
}
- amba_set_drvdata(dev, t);
+ amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
- mutex_init(&t->mutex);
- t->dev = &dev->dev;
- t->flags = TRACER_CYCLE_ACC;
+ t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
t->etm_portsz = 1;
+ t->etm_contextid_size = 3;
- etm_unlock(t);
- (void)etm_readl(t, ETMMR_PDSR);
+ etm_unlock(t, t->etm_regs_count);
+ (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
/* dummy first read */
- (void)etm_readl(&tracer, ETMMR_OSSRR);
-
- t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
- etm_writel(t, 0x440, ETMR_CTRL);
- etm_lock(t);
+ (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
+
+ etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
+ t->ncmppairs = etmccr & 0xf;
+ if (etmccr & ETMCCR_ETMIDR_PRESENT) {
+ etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
+ etm_version = ETMIDR_VERSION(etmidr);
+ if (etm_version >= ETMIDR_VERSION_3_1)
+ etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
+ }
+ etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+ etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+ etm_lock(t, t->etm_regs_count);
ret = sysfs_create_file(&dev->dev.kobj,
&trace_running_attr.attr);
@@ -582,36 +927,101 @@ static int etm_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
- dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+ ret = sysfs_create_file(&dev->dev.kobj,
+ &trace_contextid_size_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_contextid_size in sysfs\n");
+
+ ret = sysfs_create_file(&dev->dev.kobj,
+ &trace_branch_output_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_branch_output in sysfs\n");
+
+ if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
+ ret = sysfs_create_file(&dev->dev.kobj,
+ &trace_return_stack_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_return_stack in sysfs\n");
+ }
+
+ if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
+ ret = sysfs_create_file(&dev->dev.kobj,
+ &trace_timestamp_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_timestamp in sysfs\n");
+ }
+
+ ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+ if (etm_version < ETMIDR_VERSION_PFT_1_0) {
+ ret = sysfs_create_file(&dev->dev.kobj,
+ &trace_data_range_attr.attr);
+ if (ret)
+ dev_dbg(&dev->dev,
+ "Failed to create trace_data_range in sysfs\n");
+ } else {
+ tracer.flags &= ~TRACER_TRACE_DATA;
+ }
+
+ dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+ /* Enable formatter if there are multiple trace sources */
+ if (new_count > 1)
+ t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+ t->etm_regs_count = new_count;
out:
+ mutex_unlock(&t->mutex);
return ret;
out_unmap:
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
+ iounmap(t->etm_regs[t->etm_regs_count]);
out_release:
amba_release_regions(dev);
+ mutex_unlock(&t->mutex);
return ret;
}
static int etm_remove(struct amba_device *dev)
{
- struct tracectx *t = amba_get_drvdata(dev);
+ int i;
+ struct tracectx *t = &tracer;
+ void __iomem *etm_regs = amba_get_drvdata(dev);
+
+ sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+ sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
- t->etm_regs = NULL;
+ mutex_lock(&t->mutex);
+ for (i = 0; i < t->etm_regs_count; i++)
+ if (t->etm_regs[i] == etm_regs)
+ break;
+ for (; i < t->etm_regs_count - 1; i++)
+ t->etm_regs[i] = t->etm_regs[i + 1];
+ t->etm_regs_count--;
+ if (!t->etm_regs_count) {
+ kfree(t->etm_regs);
+ t->etm_regs = NULL;
+ }
+ mutex_unlock(&t->mutex);
+ iounmap(etm_regs);
amba_release_regions(dev);
- sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
return 0;
}
@@ -620,6 +1030,10 @@ static struct amba_id etm_ids[] = {
.id = 0x0003b921,
.mask = 0x0007ffff,
},
+ {
+ .id = 0x0003b950,
+ .mask = 0x0007ffff,
+ },
{ 0, 0 },
};
@@ -637,6 +1051,8 @@ static int __init etm_init(void)
{
int retval;
+ mutex_init(&tracer.mutex);
+
retval = amba_driver_register(&etb_driver);
if (retval) {
printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 38b670cea08c..b0505289b6ec 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -13,6 +13,7 @@
*/
#include <linux/ftrace.h>
+#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
@@ -64,6 +65,20 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
+int ftrace_arch_code_modify_prepare(void)
+{
+ set_kernel_text_rw();
+ set_all_modules_text_rw();
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+ set_all_modules_text_ro();
+ set_kernel_text_ro();
+ return 0;
+}
+
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5ff2e77782b1..f031a4f82934 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cpu_pm.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -49,6 +50,9 @@ static int core_num_wrps;
/* Debug architecture version. */
static u8 debug_arch;
+/* Does debug architecture support OS Save and Restore? */
+static bool has_ossr;
+
/* Maximum supported watchpoint length. */
static u8 max_watchpoint_len;
@@ -903,6 +907,23 @@ static struct undef_hook debug_reg_hook = {
.fn = debug_reg_trap,
};
+/* Does this core support OS Save and Restore? */
+static bool core_has_os_save_restore(void)
+{
+ u32 oslsr;
+
+ switch (get_debug_arch()) {
+ case ARM_DEBUG_ARCH_V7_1:
+ return true;
+ case ARM_DEBUG_ARCH_V7_ECP14:
+ ARM_DBG_READ(c1, c1, 4, oslsr);
+ if (oslsr & ARM_OSLSR_OSLM0)
+ return true;
+ default:
+ return false;
+ }
+}
+
static void reset_ctrl_regs(void *unused)
{
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
@@ -930,11 +951,7 @@ static void reset_ctrl_regs(void *unused)
if ((val & 0x1) == 0)
err = -EPERM;
- /*
- * Check whether we implement OS save and restore.
- */
- ARM_DBG_READ(c1, c1, 4, val);
- if ((val & 0x9) == 0)
+ if (!has_ossr)
goto clear_vcr;
break;
case ARM_DEBUG_ARCH_V7_1:
@@ -1015,6 +1032,31 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
.notifier_call = dbg_reset_notify,
};
+#ifdef CONFIG_CPU_PM
+static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
+ void *v)
+{
+ if (action == CPU_PM_EXIT)
+ reset_ctrl_regs(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+ .notifier_call = dbg_cpu_pm_notify,
+};
+
+static void __init pm_init(void)
+{
+ if (has_ossr)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+}
+#else
+static inline void pm_init(void)
+{
+}
+#endif
+
static int __init arch_hw_breakpoint_init(void)
{
debug_arch = get_debug_arch();
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ has_ossr = core_has_os_save_restore();
+
/* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
@@ -1064,6 +1108,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&dbg_reset_nb);
+
+ pm_init();
return 0;
}
arch_initcall(arch_hw_breakpoint_init);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f9e8657dd241..82dc1522da10 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -81,6 +82,9 @@ armpmu_map_event(struct perf_event *event,
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
+ default:
+ if (event->attr.type >= PERF_TYPE_MAX)
+ return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
@@ -164,6 +168,8 @@ armpmu_stop(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
@@ -180,6 +186,8 @@ static void armpmu_start(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -207,6 +215,9 @@ armpmu_del(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
WARN_ON(idx < 0);
armpmu_stop(event, PERF_EF_UPDATE);
@@ -225,6 +236,10 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return 0;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -423,6 +438,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 5f6620684e25..f495bbcaf1b2 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -31,33 +32,36 @@
#include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const char *perf_pmu_name(void)
{
- if (!cpu_pmu)
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ if (!pmu)
return NULL;
- return cpu_pmu->name;
+ return pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
- int max_events = 0;
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
- if (cpu_pmu != NULL)
- max_events = cpu_pmu->num_events;
+ if (!pmu)
+ return 0;
- return max_events;
+ return pmu->num_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);
@@ -75,11 +79,13 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
@@ -91,6 +97,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
if (!pmu_device)
return -ENODEV;
@@ -103,6 +110,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
for (i = 0; i < irqs; ++i) {
err = 0;
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
@@ -112,7 +120,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
@@ -126,7 +134,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
}
return 0;
@@ -135,7 +143,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
@@ -148,7 +156,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu && cpu_pmu->reset)
- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
}
/*
@@ -160,21 +168,46 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(cpu_pmu);
+ if (pmu && pmu->reset)
+ pmu->reset(pmu);
else
return NOTIFY_DONE;
return NOTIFY_OK;
}
+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+
+ if (!pmu)
+ return NOTIFY_DONE;
+
+ if (action == CPU_PM_ENTER && pmu->save_regs) {
+ pmu->save_regs(pmu, pmuregs);
+ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+ pmu->restore_regs(pmu, pmuregs);
+ }
+
+ return NOTIFY_OK;
+}
+
static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
+static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -248,6 +281,9 @@ static int probe_current_pmu(struct arm_pmu *pmu)
}
}
+ /* assume PMU support all the CPUs in this case */
+ cpumask_setall(&pmu->valid_cpus);
+
put_cpu();
return ret;
}
@@ -255,15 +291,10 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- if (cpu_pmu) {
- pr_info("attempt to register multiple PMU devices!");
- return -ENOSPC;
- }
+ int ret = 0;
+ int cpu;
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
@@ -272,8 +303,28 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
}
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
- init_fn = of_id->data;
- ret = init_fn(pmu);
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+ int len;
+ const u32 *hwid;
+ hwid = of_get_property(ncluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+ if (cluster == -1 ||
+ cluster_to_logical_mask(cluster, &sibling_mask))
+ cpumask_setall(&sibling_mask);
+
+ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
} else {
ret = probe_current_pmu(pmu);
}
@@ -284,10 +335,12 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
return ret;
}
- cpu_pmu = pmu;
- cpu_pmu->plat_device = pdev;
- cpu_pmu_init(cpu_pmu);
- armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ for_each_cpu_mask(cpu, pmu->valid_cpus)
+ per_cpu(cpu_pmu, cpu) = pmu;
+
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ armpmu_register(pmu, -1);
return 0;
}
@@ -310,9 +363,17 @@ static int __init register_pmu_driver(void)
if (err)
return err;
+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
err = platform_driver_register(&cpu_pmu_driver);
- if (err)
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }
return err;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4fbc757d9cff..37c78c484576 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -950,6 +950,51 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
}
#endif
+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[cnt]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[cnt]));
+ }
+ return;
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[cnt]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[cnt]));
+ }
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
@@ -1223,6 +1268,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
cpu_pmu->max_period = (1LLU << 32) - 1;
};
@@ -1240,7 +1287,7 @@ static u32 armv7_read_num_pmnc_events(void)
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A8";
+ cpu_pmu->name = "ARMv7_Cortex_A8";
cpu_pmu->map_event = armv7_a8_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1249,7 +1296,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A9";
+ cpu_pmu->name = "ARMv7_Cortex_A9";
cpu_pmu->map_event = armv7_a9_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1258,7 +1305,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A5";
+ cpu_pmu->name = "ARMv7_Cortex_A5";
cpu_pmu->map_event = armv7_a5_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1267,7 +1314,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A15";
+ cpu_pmu->name = "ARMv7_Cortex_A15";
cpu_pmu->map_event = armv7_a15_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1277,7 +1324,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A7";
+ cpu_pmu->name = "ARMv7_Cortex_A7";
cpu_pmu->map_event = armv7_a7_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..33aafe49ff86 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -32,6 +32,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
#include <linux/leds.h>
+#include <linux/console.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
@@ -59,6 +60,18 @@ static const char *isa_modes[] = {
static volatile int hlt_counter;
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
void disable_hlt(void)
{
hlt_counter++;
@@ -92,6 +105,31 @@ __setup("hlt", hlt_setup);
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
typedef void (*phys_reset_t)(unsigned long);
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+ printk("\n");
+ pr_emerg("Restarting %s\n", linux_banner);
+ if (console_trylock()) {
+ console_unlock();
+ return;
+ }
+
+ mdelay(50);
+
+ local_irq_disable();
+ if (!console_trylock())
+ pr_emerg("arm_restart: Console was locked! Busting\n");
+ else
+ pr_emerg("arm_restart: Console was locked!\n");
+ console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
/*
* A temporary stack to use for CPU reset. This is static so that we
* don't clobber it with the identity mapping. When running with this
@@ -187,9 +225,9 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
+ idle_notifier_call_chain(IDLE_START);
tick_nohz_idle_enter();
rcu_idle_enter();
- ledtrig_cpu(CPU_LED_IDLE_START);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
@@ -220,9 +258,9 @@ void cpu_idle(void)
} else
local_irq_enable();
}
- ledtrig_cpu(CPU_LED_IDLE_END);
rcu_idle_exit();
tick_nohz_idle_exit();
+ idle_notifier_call_chain(IDLE_END);
schedule_preempt_disabled();
}
}
@@ -262,6 +300,10 @@ void machine_restart(char *cmd)
{
machine_shutdown();
+ /* Flush the console to make sure all the relevant messages make it
+ * out to the console drivers */
+ arm_machine_flush_console();
+
arm_pm_restart(reboot_mode, cmd);
/* Give a grace period for failure to restart of 1s */
@@ -273,6 +315,77 @@ void machine_restart(char *cmd)
while (1);
}
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+ int i, j;
+ int nlines;
+ u32 *p;
+
+ /*
+ * don't attempt to dump non-kernel addresses or
+ * values that are probably just small negative numbers
+ */
+ if (addr < PAGE_OFFSET || addr > -256UL)
+ return;
+
+ printk("\n%s: %#lx:\n", name, addr);
+
+ /*
+ * round address down to a 32 bit boundary
+ * and always dump a multiple of 32 bytes
+ */
+ p = (u32 *)(addr & ~(sizeof(u32) - 1));
+ nbytes += (addr & (sizeof(u32) - 1));
+ nlines = (nbytes + 31) / 32;
+
+
+ for (i = 0; i < nlines; i++) {
+ /*
+ * just display low 16 bits of address to keep
+ * each line of the dump < 80 characters
+ */
+ printk("%04lx ", (unsigned long)p & 0xffff);
+ for (j = 0; j < 8; j++) {
+ u32 data;
+ if (probe_kernel_address(p, data)) {
+ printk(" ********");
+ } else {
+ printk(" %08x", data);
+ }
+ ++p;
+ }
+ printk("\n");
+ }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+ show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+ show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+ show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+ show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+ show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+ show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+ show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+ show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+ show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+ show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+ show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+ show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+ show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+ show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+ show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+ set_fs(fs);
+}
+
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -332,6 +445,8 @@ void __show_regs(struct pt_regs *regs)
printk("Control: %08x%s\n", ctrl, buf);
}
#endif
+
+ show_extra_register_data(regs, 128);
}
void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..c115ab3c27fb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -66,6 +66,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_CPU_BACKTRACE,
};
static DECLARE_COMPLETION(cpu_running);
@@ -442,6 +443,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -576,6 +578,58 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpu_clear(this_cpu, backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpu_isset(cpu, backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warning("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -624,6 +678,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 79282ebcd939..17a01a00d357 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -287,6 +287,126 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu = 0;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ if (cpu >= num_possible_cpus())
+ break;
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+
+ cpu++;
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct cpumask hmp_slow_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu)
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
index a611ad3153c7..b6132aa95dc0 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
@@ -463,6 +463,9 @@
GPIO76_LCD_PCLK, \
GPIO77_LCD_BIAS
+/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
+#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
+#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
extern int keypad_set_wake(unsigned int on);
#endif /* __ASM_ARCH_MFP_PXA27X_H */
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 8047ee0effc5..616cb87b6179 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = {
- GPIO113_GPIO,
+ GPIO113_AC97_nRESET_GPIO_HIGH,
GPIO113_AC97_nRESET,
- GPIO95_GPIO,
+ GPIO95_AC97_nRESET_GPIO_HIGH,
GPIO95_AC97_nRESET,
};
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..9d32a253aac9 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,6 +7,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o vmregion.o
+obj-$(CONFIG_DEBUG_RODATA) += rodata.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index d8fd4d4bd3d4..7a3d3d8d98d7 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -270,6 +270,11 @@ v6_dma_clean_range:
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+ sub r2, r1, r0
+ cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+ bhi v6_dma_flush_dcache_all
+#endif
#ifdef CONFIG_DMA_CACHE_RWFO
ldrb r2, [r0] @ read for ownership
strb r2, [r0] @ write for ownership
@@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+ mov r0, #0
+#ifdef HARVARD_CACHE
+ mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
+#else
+ mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
+#endif
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+#endif
+
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd6..59eeb5c20bde 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -555,11 +555,25 @@ static void __init *early_alloc(unsigned long sz)
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+ __pmd_populate(pmd, __pa(pte), prot);
+ BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+ unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
- __pmd_populate(pmd, __pa(pte), prot);
+ pte_t *pte = early_pte_alloc(pmd);
+ early_pte_install(pmd, pte, prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
@@ -569,16 +583,23 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
- pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ pte_t *start_pte = early_pte_alloc(pmd);
+ pte_t *pte = start_pte + pte_index(addr);
+
+ /* If replacing a section mapping, the whole section must be replaced */
+ BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
+ early_pte_install(pmd, start_pte, type->prot_l1);
}
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ bool force_pages)
{
pmd_t *pmd = pmd_offset(pud, addr);
@@ -588,7 +609,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0
+ && !force_pages) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -611,15 +633,15 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
}
}
-static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys, const struct mem_type *type)
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
+ unsigned long phys, const struct mem_type *type, bool force_pages)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_section(pud, addr, next, phys, type, force_pages);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -693,7 +715,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -743,7 +765,7 @@ static void __init create_mapping(struct map_desc *md)
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type);
+ alloc_init_pud(pgd, addr, next, phys, type, force_pages);
phys += next - addr;
addr = next;
@@ -764,7 +786,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
for (md = io_desc; nr; md++, nr--) {
- create_mapping(md);
+ create_mapping(md, false);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
@@ -888,7 +910,7 @@ void __init debug_ll_io_init(void)
map.virtual &= PAGE_MASK;
map.length = PAGE_SIZE;
map.type = MT_DEVICE;
- create_mapping(&map);
+ create_mapping(&map, false);
}
#endif
@@ -1130,7 +1152,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
/*
@@ -1141,14 +1163,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
map.type = MT_CACHECLEAN;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
#ifdef FLUSH_BASE_MINICACHE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
map.virtual = FLUSH_BASE_MINICACHE;
map.length = SZ_1M;
map.type = MT_MINICLEAN;
- create_mapping(&map);
+ create_mapping(&map, false);
#endif
/*
@@ -1160,12 +1182,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
- create_mapping(&map);
+ create_mapping(&map, false);
if (!vectors_high()) {
map.virtual = 0;
map.type = MT_LOW_VECTORS;
- create_mapping(&map);
+ create_mapping(&map, false);
}
/*
@@ -1191,20 +1213,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
- pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+ pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
}
+
static void __init map_lowmem(void)
{
struct memblock_region *reg;
+ phys_addr_t start;
+ phys_addr_t end;
+ struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
- struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1216,8 +1241,20 @@ static void __init map_lowmem(void)
map.length = end - start;
map.type = MT_MEMORY;
- create_mapping(&map);
+ create_mapping(&map, false);
}
+
+#ifdef CONFIG_DEBUG_RODATA
+ start = __pa(_stext) & PMD_MASK;
+ end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map, true);
+#endif
}
/*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
new file mode 100644
index 000000000000..9a8eb841c428
--- /dev/null
+++ b/arch/arm/mm/rodata.c
@@ -0,0 +1,159 @@
+/*
+ * linux/arch/arm/mm/rodata.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * Based on x86 implementation in arch/x86/mm/init_32.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/rodata.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+static int kernel_set_to_readonly __read_mostly;
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+static const int rodata_test_data = 0xC3;
+
+static noinline void rodata_test(void)
+{
+ int result;
+
+ pr_info("%s: attempting to write to read-only section:\n", __func__);
+
+ if (*(volatile int *)&rodata_test_data != 0xC3) {
+ pr_err("read only data changed before test\n");
+ return;
+ }
+
+ /*
+ * Attempt to to write to rodata_test_data, trapping the expected
+ * data abort. If the trap executed, result will be 1. If it didn't,
+ * result will be 0xFF.
+ */
+ asm volatile(
+ "0: str %[zero], [%[rodata_test_data]]\n"
+ " mov %[result], #0xFF\n"
+ " b 2f\n"
+ "1: mov %[result], #1\n"
+ "2:\n"
+
+ /* Exception fixup - if store at label 0 faults, jumps to 1 */
+ ".pushsection __ex_table, \"a\"\n"
+ " .long 0b, 1b\n"
+ ".popsection\n"
+
+ : [result] "=r" (result)
+ : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
+ : "memory"
+ );
+
+ if (result == 1)
+ pr_info("write to read-only section trapped, success\n");
+ else
+ pr_err("write to read-only section NOT trapped, test failed\n");
+
+ if (*(volatile int *)&rodata_test_data != 0xC3)
+ pr_err("read only data changed during write\n");
+}
+#else
+static inline void rodata_test(void) { }
+#endif
+
+static int set_page_attributes(unsigned long virt, int numpages,
+ pte_t (*f)(pte_t))
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long start = virt;
+ unsigned long end = virt + (numpages << PAGE_SHIFT);
+ unsigned long pmd_end;
+
+ while (virt < end) {
+ pmd = pmd_off_k(virt);
+ pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
+
+ if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
+ pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
+ __func__, pmd, pmd_val(*pmd), virt);
+ virt = pmd_end;
+ continue;
+ }
+
+ while (virt < pmd_end) {
+ pte = pte_offset_kernel(pmd, virt);
+ set_pte_ext(pte, f(*pte), 0);
+ virt += PAGE_SIZE;
+ }
+ }
+
+ flush_tlb_kernel_range(start, end);
+
+ return 0;
+}
+
+int set_memory_ro(unsigned long virt, int numpages)
+{
+ return set_page_attributes(virt, numpages, pte_wrprotect);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long virt, int numpages)
+{
+ return set_page_attributes(virt, numpages, pte_mkwrite);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+void set_kernel_text_rw(void)
+{
+ unsigned long start = PAGE_ALIGN((unsigned long)_text);
+ unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+ if (!kernel_set_to_readonly)
+ return;
+
+ pr_debug("Set kernel text: %lx - %lx to read-write\n",
+ start, start + size);
+
+ set_memory_rw(start, size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+ unsigned long start = PAGE_ALIGN((unsigned long)_text);
+ unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+ if (!kernel_set_to_readonly)
+ return;
+
+ pr_info_once("Write protecting the kernel text section %lx - %lx\n",
+ start, start + size);
+
+ pr_debug("Set kernel text: %lx - %lx to read only\n",
+ start, start + size);
+
+ set_memory_ro(start, size >> PAGE_SHIFT);
+}
+
+void mark_rodata_ro(void)
+{
+ kernel_set_to_readonly = 1;
+
+ set_kernel_text_ro();
+
+ rodata_test();
+}
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index cc926c985981..7788327044ff 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -28,6 +28,7 @@ ENTRY(do_vfp)
str r11, [r10, #TI_PREEMPT]
#endif
enable_irq
+ str r2, [sp, #S_PC] @ update regs->ARM_pc for Thumb 2 case
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 801e2d7fcbc6..32ac0aef0068 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,4 +1,5 @@
targets += dtbs
+targets += $(dtb-y)
dtbs: $(addprefix $(obj)/, $(dtb-y))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 64b133949502..e333a243bfcc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -24,7 +24,8 @@
/*
* Software defined PTE bits definition.
*/
-#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */
+#define PTE_VALID (_AT(pteval_t, 1) << 0)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
@@ -60,9 +61,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
extern pgprot_t pgprot_default;
-#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
+#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
-#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -72,7 +76,7 @@ extern pgprot_t pgprot_default;
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
-#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -125,16 +129,15 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & PTE_VALID)
+#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
-#define pte_present_exec_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
- (PTE_VALID | PTE_USER))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -157,10 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_present_exec_user(pte))
- __sync_icache_dcache(pte, addr);
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
+ if (pte_valid_user(pte)) {
+ if (pte_exec(pte))
+ __sync_icache_dcache(pte, addr);
+ if (!pte_dirty(pte))
+ pte = pte_wrprotect(pte);
+ }
+
set_pte(ptep, pte);
}
@@ -170,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
-#define __pgprot_modify(prot,mask,bits) \
- __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
-
#define __HAVE_ARCH_PTE_SPECIAL
/*
@@ -264,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
+ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+ PTE_PROT_NONE | PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58432625fdb3..5ef47ba3ed45 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -395,8 +395,13 @@ __SYSCALL(370, sys_name_to_handle_at)
__SYSCALL(371, compat_sys_open_by_handle_at)
__SYSCALL(372, compat_sys_clock_adjtime)
__SYSCALL(373, sys_syncfs)
+__SYSCALL(374, compat_sys_sendmmsg)
+__SYSCALL(375, sys_setns)
+__SYSCALL(376, compat_sys_process_vm_readv)
+__SYSCALL(377, compat_sys_process_vm_writev)
+__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */
-#define __NR_compat_syscalls 374
+#define __NR_compat_syscalls 379
/*
* Compat syscall numbers used by the AArch64 kernel.
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index c958cb84d75f..6a389dc1bd49 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk)
void update_vsyscall_tz(void)
{
- ++vdso_data->tb_seq_count;
- smp_wmb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- smp_wmb();
- ++vdso_data->tb_seq_count;
}
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index 8bf658d974f9..f0a6d10b5211 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -73,8 +73,6 @@ ENTRY(__kernel_gettimeofday)
/* If tz is NULL, return 0. */
cbz x1, 3f
ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
- seqcnt_read w9
- seqcnt_check w9, 1b
stp w4, w5, [x1, #TZ_MINWEST]
3:
mov x0, xzr
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index aa03f2e13385..e70001cfa05b 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -6,6 +6,7 @@ config MN10300
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
+ select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index ed0e0254b47f..e3af3286a068 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
-#include <uapi/asm/epapr_hcalls.h>
+#include <asm/epapr_hcalls.h>
#define KVM_FEATURE_MAGIC_PAGE 1
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 35f3cf0269b3..a353c485808c 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
+#ifdef CONFIG_PPC_POWERNV
struct opal_machine_check_event *opal_evt;
+#endif
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
handled = 0;
}
+#ifdef CONFIG_PPC_POWERNV
/*
* See if OPAL has already handled the condition.
* We assume that if the condition is recovered then OPAL
@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
if (handled)
opal_evt->in_use = 0;
+#endif
return handled;
}
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 4b8e08b56f49..7e3ce78d4290 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -24,8 +24,8 @@ CHECKFLAGS += -D__s390__ -msize-long
else
LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
-KBUILD_AFLAGS_MODULE += -fpic -D__PIC__
-KBUILD_CFLAGS_MODULE += -fpic -D__PIC__
+KBUILD_AFLAGS_MODULE += -fPIC
+KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index de015d85e3e5..bb9bdcd20864 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -10,4 +10,10 @@
*/
#define MAX_DMA_ADDRESS 0x80000000
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 16c3eb164f4f..27cb32185ce1 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -85,6 +85,11 @@ static inline void iounmap(volatile void __iomem *addr)
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+#define readq_relaxed readq
+
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index e6972f85d2b0..7def77302d63 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -2,43 +2,61 @@
#define _ASM_IRQ_H
#include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/cache.h>
#include <linux/types.h>
-enum interruption_class {
+enum interruption_main_class {
EXTERNAL_INTERRUPT,
IO_INTERRUPT,
- EXTINT_CLK,
- EXTINT_EXC,
- EXTINT_EMS,
- EXTINT_TMR,
- EXTINT_TLA,
- EXTINT_PFL,
- EXTINT_DSD,
- EXTINT_VRT,
- EXTINT_SCP,
- EXTINT_IUC,
- EXTINT_CMS,
- EXTINT_CMC,
- EXTINT_CMR,
- IOINT_CIO,
- IOINT_QAI,
- IOINT_DAS,
- IOINT_C15,
- IOINT_C70,
- IOINT_TAP,
- IOINT_VMR,
- IOINT_LCS,
- IOINT_CLW,
- IOINT_CTC,
- IOINT_APB,
- IOINT_ADM,
- IOINT_CSC,
- IOINT_PCI,
- IOINT_MSI,
+ NR_IRQS
+};
+
+enum interruption_class {
+ IRQEXT_CLK,
+ IRQEXT_EXC,
+ IRQEXT_EMS,
+ IRQEXT_TMR,
+ IRQEXT_TLA,
+ IRQEXT_PFL,
+ IRQEXT_DSD,
+ IRQEXT_VRT,
+ IRQEXT_SCP,
+ IRQEXT_IUC,
+ IRQEXT_CMS,
+ IRQEXT_CMC,
+ IRQEXT_CMR,
+ IRQIO_CIO,
+ IRQIO_QAI,
+ IRQIO_DAS,
+ IRQIO_C15,
+ IRQIO_C70,
+ IRQIO_TAP,
+ IRQIO_VMR,
+ IRQIO_LCS,
+ IRQIO_CLW,
+ IRQIO_CTC,
+ IRQIO_APB,
+ IRQIO_ADM,
+ IRQIO_CSC,
+ IRQIO_PCI,
+ IRQIO_MSI,
NMI_NMI,
- NR_IRQS,
+ CPU_RST,
+ NR_ARCH_IRQS
};
+struct irq_stat {
+ unsigned int irqs[NR_ARCH_IRQS];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+
+static __always_inline void inc_irq_stat(enum interruption_class irq)
+{
+ __get_cpu_var(irq_stat).irqs[irq]++;
+}
+
struct ext_code {
unsigned short subcode;
unsigned short code;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c928dc1938f2..c1d7930a82f4 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1387,10 +1387,7 @@ static inline int has_transparent_hugepage(void)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- if (pmd_trans_huge(pmd))
- return pmd_val(pmd) >> HPAGE_SHIFT;
- else
- return pmd_val(pmd) >> PAGE_SHIFT;
+ return pmd_val(pmd) >> PAGE_SHIFT;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index fba4d66788a2..4c060bb5b8ea 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
return get_clock_xt() - sched_clock_base_cc;
}
+/**
+ * tod_to_ns - convert a TOD format value to nanoseconds
+ * @todval: to be converted TOD format value
+ * Returns: number of nanoseconds that correspond to the TOD format value
+ *
+ * Converting a 64 Bit TOD format value to nanoseconds means that the value
+ * must be divided by 4.096. In order to achieve that we multiply with 125
+ * and divide by 512:
+ *
+ * ns = (todval * 125) >> 9;
+ *
+ * In order to avoid an overflow with the multiplication we can rewrite this.
+ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * we end up with
+ *
+ * ns = ((2^32 * th + tl) * 125 ) >> 9;
+ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ *
+ */
+static inline unsigned long long tod_to_ns(unsigned long long todval)
+{
+ unsigned long long ns;
+
+ ns = ((todval >> 32) << 23) * 125;
+ ns += ((todval & 0xffffffff) * 125) >> 9;
+ return ns;
+}
+
#endif
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 63e6078699f1..864f693c237f 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -279,7 +279,8 @@
#define __NR_process_vm_writev 341
#define __NR_s390_runtime_instr 342
#define __NR_kcmp 343
-#define NR_syscalls 344
+#define __NR_finit_module 344
+#define NR_syscalls 345
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 827e094a2f49..9b9a805656b5 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1659,3 +1659,9 @@ ENTRY(sys_kcmp_wrapper)
llgfr %r5,%r5 # unsigned long
llgfr %r6,%r6 # unsigned long
jg sys_kcmp
+
+ENTRY(sys_finit_module_wrapper)
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # const char __user *
+ lgfr %r4,%r4 # int
+ jg sys_finit_module
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ba500d8dc392..4e8215e0d4b6 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1127,13 +1127,14 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
if (i == DEBUG_MAX_VIEWS) {
pr_err("Registering view %s/%s would exceed the maximum "
"number of views %i\n", id->name, view->name, i);
- debugfs_remove(pde);
rc = -1;
} else {
id->views[i] = view;
id->debugfs_entries[i] = pde;
}
spin_unlock_irqrestore(&id->lock, flags);
+ if (rc)
+ debugfs_remove(pde);
out:
return rc;
}
@@ -1146,9 +1147,9 @@ EXPORT_SYMBOL(debug_register_view);
int
debug_unregister_view(debug_info_t * id, struct debug_view *view)
{
- int rc = 0;
- int i;
+ struct dentry *dentry = NULL;
unsigned long flags;
+ int i, rc = 0;
if (!id)
goto out;
@@ -1160,10 +1161,12 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
if (i == DEBUG_MAX_VIEWS)
rc = -1;
else {
- debugfs_remove(id->debugfs_entries[i]);
+ dentry = id->debugfs_entries[i];
id->views[i] = NULL;
+ id->debugfs_entries[i] = NULL;
}
spin_unlock_irqrestore(&id->lock, flags);
+ debugfs_remove(dentry);
out:
return rc;
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index bf24293970ce..9df824ea1667 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -24,43 +24,65 @@
#include <asm/irq.h>
#include "entry.h"
+DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
+
struct irq_class {
char *name;
char *desc;
};
-static const struct irq_class intrclass_names[] = {
+/*
+ * The list of "main" irq classes on s390. This is the list of interrrupts
+ * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
+ * Historically only external and I/O interrupts have been part of /proc/stat.
+ * We can't add the split external and I/O sub classes since the first field
+ * in the "intr" line in /proc/stat is supposed to be the sum of all other
+ * fields.
+ * Since the external and I/O interrupt fields are already sums we would end
+ * up with having a sum which accounts each interrupt twice.
+ */
+static const struct irq_class irqclass_main_desc[NR_IRQS] = {
[EXTERNAL_INTERRUPT] = {.name = "EXT"},
- [IO_INTERRUPT] = {.name = "I/O"},
- [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
- [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
- [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
- [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
- [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
- [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
- [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
- [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
- [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
- [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
- [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
- [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
- [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
- [IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
- [IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
- [IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
- [IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"},
- [IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"},
- [IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
- [IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
- [IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
- [IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
- [IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
- [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
- [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
- [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
- [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
- [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
+ [IO_INTERRUPT] = {.name = "I/O"}
+};
+
+/*
+ * The list of split external and I/O interrupts that appear only in
+ * /proc/interrupts.
+ * In addition this list contains non external / I/O events like NMIs.
+ */
+static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+ [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
+ [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
+ [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
+ [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
+ [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
+ [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
+ [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
+ [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
+ [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
+ [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
+ [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
+ [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
+ [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
+ [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
+ [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
+ [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
+ [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"},
+ [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"},
+ [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
+ [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
+ [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
+ [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
+ [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
+ [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
+ [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
+ [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
+ [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
+ [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
+ [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
/*
@@ -68,30 +90,34 @@ static const struct irq_class intrclass_names[] = {
*/
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *) v, j;
+ int irq = *(loff_t *) v;
+ int cpu;
get_online_cpus();
- if (i == 0) {
+ if (irq == 0) {
seq_puts(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
+ for_each_online_cpu(cpu)
+ seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
-
- if (i < NR_IRQS) {
- seq_printf(p, "%s: ", intrclass_names[i].name);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
- if (intrclass_names[i].desc)
- seq_printf(p, " %s", intrclass_names[i].desc);
- seq_putc(p, '\n');
- }
+ if (irq < NR_IRQS) {
+ seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
+ seq_putc(p, '\n');
+ goto skip_arch_irqs;
+ }
+ for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
+ seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
+ if (irqclass_sub_desc[irq].desc)
+ seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
+ seq_putc(p, '\n');
+ }
+skip_arch_irqs:
put_online_cpus();
- return 0;
+ return 0;
}
/*
@@ -222,7 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
/* Serve timer interrupts first. */
clock_comparator_work();
}
- kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+ kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index a6daa5c5cdb0..7918fbea36bb 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
int umode;
nmi_enter();
- kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
+ inc_irq_stat(NMI_NMI);
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck);
umode = user_mode(regs);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index c4e7269d4a09..86ec7447e1f5 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
if (!(alert & CPU_MF_INT_CF_MASK))
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
+ inc_irq_stat(IRQEXT_CMC);
cpuhw = &__get_cpu_var(cpu_hw_events);
/* Measurement alerts are shared and might happen when the PMU
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 61066f6f71a5..077a99389b07 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_RI_MASK))
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
+ inc_irq_stat(IRQEXT_CMR);
if (!current->thread.ri_cb)
return;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2568590973ad..a5360de85ec7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -16,7 +16,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
@@ -289,6 +289,7 @@ void machine_power_off(void)
* Dummy power off function.
*/
void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL_GPL(pm_power_off);
static int __init early_parse_mem(char *p)
{
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b45baa55438..7433a2f9e5cc 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code,
cpu = smp_processor_id();
if (ext_code.code == 0x1202)
- kstat_cpu(cpu).irqs[EXTINT_EXC]++;
+ inc_irq_stat(IRQEXT_EXC);
else
- kstat_cpu(cpu).irqs[EXTINT_EMS]++;
+ inc_irq_stat(IRQEXT_EMS);
/*
* handle bit signal external calls
*/
@@ -623,9 +623,10 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
return info;
}
-static int smp_add_present_cpu(int cpu);
+static int __cpuinit smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
+static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
+ int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
@@ -708,6 +709,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
pfault_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
+ inc_irq_stat(CPU_RST);
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
@@ -985,7 +987,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
return notifier_from_errno(err);
}
-static int smp_add_present_cpu(int cpu)
+static int __cpuinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 48174850f3b0..6a6c61f94dd3 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -352,3 +352,4 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr
SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
+SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7fcd690d42c7..a5f4f5a1d24b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace __kprobes sched_clock(void)
{
- return (get_clock_monotonic() * 125) >> 9;
+ return tod_to_ns(get_clock_monotonic());
}
/*
@@ -168,7 +168,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
unsigned int param32,
unsigned long param64)
{
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
+ inc_irq_stat(IRQEXT_CLK);
if (S390_lowcore.clock_comparator == -1ULL)
set_clock_comparator(S390_lowcore.clock_comparator);
}
@@ -179,7 +179,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
- kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
+ inc_irq_stat(IRQEXT_TLA);
if (param32 & 0x00c40000)
etr_timing_alert((struct etr_irq_parm *) &param32);
if (param32 & 0x00038000)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index f1aba87cceb8..4b2e3e317004 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -42,6 +43,7 @@ static struct mask_info socket_info;
static struct mask_info book_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c30615e605ac..82c481ddef76 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return 0;
}
- sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
+ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c9011bfaabbe..f090e819bf71 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -613,7 +613,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
+ preempt_disable();
kvm_guest_enter();
+ preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 42601d6e166f..2fb9e63b8fc4 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code,
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
+ inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 0cb385da202c..b5b2916895e0 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
+ inc_irq_stat(IRQEXT_CMS);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
if (hws_wq)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index ff49427e9941..60e0372545d2 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -160,35 +160,6 @@ int pci_proc_domain(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_proc_domain);
-/* Store PCI function information block */
-static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
-{
- struct zpci_fib *fib;
- u8 status, cc;
-
- fib = (void *) get_zeroed_page(GFP_KERNEL);
- if (!fib)
- return -ENOMEM;
-
- do {
- cc = __stpcifc(zdev->fh, 0, fib, &status);
- if (cc == 2) {
- msleep(ZPCI_INSN_BUSY_DELAY);
- memset(fib, 0, PAGE_SIZE);
- }
- } while (cc == 2);
-
- if (cc)
- pr_err_once("%s: cc: %u status: %u\n",
- __func__, cc, status);
-
- /* Return PCI function controls */
- *fc = fib->fc;
-
- free_page((unsigned long) fib);
- return (cc) ? -EIO : 0;
-}
-
/* Modify PCI: Register adapter interruptions */
static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
u64 aibv)
@@ -469,7 +440,7 @@ static void zpci_irq_handler(void *dont, void *need)
int rescan = 0, max = aisb_max;
struct zdev_irq_map *imap;
- kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
+ inc_irq_stat(IRQIO_PCI);
sbit = start;
scan:
@@ -481,7 +452,7 @@ scan:
/* find vector bit */
imap = bucket->imap[sbit];
for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
- kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
+ inc_irq_stat(IRQIO_MSI);
clear_bit(63 - mbit, &imap->aibv);
spin_lock(&imap->lock);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6138468b420f..a547419907c3 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -13,8 +13,6 @@
#include <linux/pci.h>
#include <asm/pci_dma.h>
-static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
-
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3fede4556c91..a0fa5791cd44 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -70,6 +70,16 @@
* OFF-ON : MMC
*/
+/*
+ * FSI - DA7210
+ *
+ * it needs amixer settings for playing
+ *
+ * amixer set 'HeadPhone' 80
+ * amixer set 'Out Mixer Left DAC Left' on
+ * amixer set 'Out Mixer Right DAC Right' on
+ */
+
/* Heartbeat */
static unsigned char led_pos[] = { 0, 1, 2, 3 };
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index 37924afa8d8a..bf9f44f17c29 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -203,9 +203,9 @@ extern void __kernel_vsyscall;
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
else \
- NEW_AUX_ENT(AT_IGNORE, 0);
+ NEW_AUX_ENT(AT_IGNORE, 0)
#else
-#define VSYSCALL_AUX_ENT
+#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif /* CONFIG_VSYSCALL */
#ifdef CONFIG_SH_FPU
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index b1320d55ca30..e699a12cdcca 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -39,7 +39,7 @@
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 1ee8946f0952..1cc7d3197143 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -47,7 +47,7 @@ pc; })
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h
index 9e465f246dc1..d13a1d623736 100644
--- a/arch/sh/include/uapi/asm/unistd_32.h
+++ b/arch/sh/include/uapi/asm/unistd_32.h
@@ -379,7 +379,8 @@
#define __NR_process_vm_readv 365
#define __NR_process_vm_writev 366
#define __NR_kcmp 367
+#define __NR_finit_module 368
-#define NR_syscalls 368
+#define NR_syscalls 369
#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index 8e3a2edd284e..e6820c86e8c7 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -399,7 +399,8 @@
#define __NR_process_vm_readv 376
#define __NR_process_vm_writev 377
#define __NR_kcmp 378
+#define __NR_finit_module 379
-#define NR_syscalls 379
+#define NR_syscalls 380
#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index fe97ae5e56f1..734234be2f01 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -385,3 +385,4 @@ ENTRY(sys_call_table)
.long sys_process_vm_readv /* 365 */
.long sys_process_vm_writev
.long sys_kcmp
+ .long sys_finit_module
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 5c7b1c67bdc1..579fcb9a896b 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -405,3 +405,4 @@ sys_call_table:
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
+ .long sys_finit_module
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 60164e65d665..52aa2011d753 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -294,6 +294,8 @@ stack_panic:
.align 2
.L_init_thread_union:
.long init_thread_union
+.L_ebss:
+ .long __bss_stop
.Lpanic:
.long panic
.Lpanic_s:
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index cac719d1bc5c..62ced589bcf7 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -407,8 +407,9 @@
#define __NR_process_vm_writev 339
#define __NR_kern_features 340
#define __NR_kcmp 341
+#define __NR_finit_module 342
-#define NR_syscalls 342
+#define NR_syscalls 343
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 04bacce76fe6..baf4366e2d6a 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -378,7 +378,8 @@ static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
* a proper 'ranges' property.
*/
-static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
+static void apb_fake_ranges(struct pci_dev *dev,
+ struct pci_bus *bus,
struct pci_pbm_info *pbm)
{
struct pci_bus_region region;
@@ -403,13 +404,15 @@ static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
pcibios_bus_to_resource(dev, res, &region);
}
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+ struct device_node *node,
struct pci_bus *bus);
#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
- struct device_node *node, struct pci_dev *dev)
+ struct device_node *node,
+ struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
@@ -500,7 +503,8 @@ after_ranges:
pci_of_scan_bus(pbm, node, bus);
}
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+ struct device_node *node,
struct pci_bus *bus)
{
struct device_node *child;
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index b85238289717..c647634ead2b 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -366,7 +366,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
pci_config_write8(addr, 64);
}
-static void psycho_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
+static void psycho_scan_bus(struct pci_pbm_info *pbm,
+ struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable = 0;
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 531186d7c9ab..6f00d27e8dac 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -442,7 +442,8 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
sabre_register_error_handlers(pbm);
}
-static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op)
+static void sabre_pbm_init(struct pci_pbm_info *pbm,
+ struct platform_device *op)
{
psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 29e888158ae6..8f76f23dac38 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1306,8 +1306,9 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
}
}
-static int schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op,
- u32 portid, int chip_type)
+static int schizo_pbm_init(struct pci_pbm_info *pbm,
+ struct platform_device *op, u32 portid,
+ int chip_type)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 5147f574f125..6ac43c36bbbf 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -85,4 +85,4 @@ sys_call_table:
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .long sys_ni_syscall, sys_kcmp
+/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index cdbd9b817751..1009ecb92678 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -86,7 +86,7 @@ sys_call_table32:
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp
+/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
#endif /* CONFIG_COMPAT */
@@ -164,4 +164,4 @@ sys_call_table:
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp
+/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index b1942e222768..18e329ca108e 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -302,7 +302,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
if (status != EFI_SUCCESS)
continue;
- if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)
+ if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
continue;
if (!pci->romimage || !pci->romsize)
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index c5d1785373ed..02bab09707f2 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,13 +1,6 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
#ifdef CONFIG_X86_64
void enter_idle(void);
void exit_idle(void);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08b973f64032..9c2bd8bd4b4c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -43,6 +43,7 @@
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/kvm_guest.h>
+#include <asm/context_tracking.h>
static int kvmapf = 1;
@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait);
+ rcu_irq_enter();
+
spin_lock(&b->lock);
e = _find_apf_task(b, token);
if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
hlist_del(&e->link);
kfree(e);
spin_unlock(&b->lock);
+
+ rcu_irq_exit();
return;
}
@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
/*
* We cannot reschedule. So halt.
*/
+ rcu_irq_exit();
native_safe_halt();
+ rcu_irq_enter();
local_irq_disable();
}
}
if (!n.halted)
finish_wait(&n.wq, &wait);
+ rcu_irq_exit();
return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
- rcu_irq_enter();
+ exception_enter(regs);
exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
- rcu_irq_exit();
+ exception_exit(regs);
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..5c38b7a70f81 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -40,19 +40,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
#endif
struct kmem_cache *task_xstate_cachep;
@@ -287,14 +274,14 @@ static inline void play_dead(void)
void enter_idle(void)
{
this_cpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+ idle_notifier_call_chain(IDLE_START);
}
static void __exit_idle(void)
{
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+ idle_notifier_call_chain(IDLE_END);
}
/* Called from interrupts to signify idle end */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 23ddd558fbd5..00f6c1472b85 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -610,6 +610,83 @@ static __init void reserve_ibft_region(void)
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+static bool __init snb_gfx_workaround_needed(void)
+{
+#ifdef CONFIG_PCI
+ int i;
+ u16 vendor, devid;
+ static const __initconst u16 snb_ids[] = {
+ 0x0102,
+ 0x0112,
+ 0x0122,
+ 0x0106,
+ 0x0116,
+ 0x0126,
+ 0x010a,
+ };
+
+ /* Assume no if something weird is going on with PCI */
+ if (!early_pci_allowed())
+ return false;
+
+ vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
+ if (vendor != 0x8086)
+ return false;
+
+ devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
+ for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
+ if (devid == snb_ids[i])
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * Sandy Bridge graphics has trouble with certain ranges, exclude
+ * them from allocation.
+ */
+static void __init trim_snb_memory(void)
+{
+ static const __initconst unsigned long bad_pages[] = {
+ 0x20050000,
+ 0x20110000,
+ 0x20130000,
+ 0x20138000,
+ 0x40004000,
+ };
+ int i;
+
+ if (!snb_gfx_workaround_needed())
+ return;
+
+ printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+
+ /*
+ * Reserve all memory below the 1 MB mark that has not
+ * already been reserved.
+ */
+ memblock_reserve(0, 1<<20);
+
+ for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
+ if (memblock_reserve(bad_pages[i], PAGE_SIZE))
+ printk(KERN_WARNING "failed to reserve 0x%08lx\n",
+ bad_pages[i]);
+ }
+}
+
+/*
+ * Here we put platform-specific memory range workarounds, i.e.
+ * memory known to be corrupt or otherwise in need to be reserved on
+ * specific platforms.
+ *
+ * If this gets used more widely it could use a real dispatch mechanism.
+ */
+static void __init trim_platform_memory_ranges(void)
+{
+ trim_snb_memory();
+}
+
static void __init trim_bios_range(void)
{
/*
@@ -630,6 +707,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
@@ -908,6 +986,8 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode();
+ trim_platform_memory_ranges();
+
init_gbpages();
/* max_pfn_mapped is updated here */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 76f54461f7cb..c243b81e3c74 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
-static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
+static struct kvm_shared_msrs __percpu *shared_msrs;
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
static void shared_msr_update(unsigned slot, u32 msr)
{
- struct kvm_shared_msrs *smsr;
u64 value;
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
- smsr = &__get_cpu_var(shared_msrs);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
- struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void *ignore)
{
- struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (smsr->registered)
kvm_on_user_return(&smsr->urn);
@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
goto out;
}
+ r = -ENOMEM;
+ shared_msrs = alloc_percpu(struct kvm_shared_msrs);
+ if (!shared_msrs) {
+ printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
+ goto out;
+ }
+
r = kvm_mmu_module_init();
if (r)
- goto out;
+ goto out_free_percpu;
kvm_set_mmio_spte_mask();
kvm_init_msr_list();
@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
return 0;
+out_free_percpu:
+ free_percpu(shared_msrs);
out:
return r;
}
@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
+ free_percpu(shared_msrs);
}
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
diff --git a/block/genhd.c b/block/genhd.c
index 9a289d7c84bb..32469daeb5c1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1103,6 +1103,22 @@ static void disk_release(struct device *dev)
blk_put_queue(disk->queue);
kfree(disk);
}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
struct class block_class = {
.name = "block",
};
@@ -1121,6 +1137,7 @@ static struct device_type disk_type = {
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
+ .uevent = disk_uevent,
};
#ifdef CONFIG_PROC_FS
diff --git a/block/partition-generic.c b/block/partition-generic.c
index f1d14519cc04..229ae3fecae1 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
kfree(p);
}
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct hd_struct *part = dev_to_part(dev);
+
+ add_uevent_var(env, "PARTN=%u", part->partno);
+ if (part->info && part->info->volname[0])
+ add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+ return 0;
+}
+
struct device_type part_type = {
.name = "partition",
.groups = part_attr_groups,
.release = part_release,
+ .uevent = part_uevent,
};
static void delete_partition_rcu_cb(struct rcu_head *head)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index f5fb0722a63a..97e63de3e659 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -98,6 +98,8 @@ source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
+source "drivers/switch/Kconfig"
+
source "drivers/accessibility/Kconfig"
source "drivers/infiniband/Kconfig"
@@ -158,4 +160,6 @@ source "drivers/irqchip/Kconfig"
source "drivers/ipack/Kconfig"
+source "drivers/gator/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7863b9fee50b..51810fc3ff98 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
+obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
@@ -146,3 +147,5 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
+
+obj-$(CONFIG_GATOR) += gator/
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 95af6f674a6c..35da18113216 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -297,7 +297,7 @@ static int acpi_platform_notify(struct device *dev)
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c8b453939da2..2f5f34c9d630 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -281,4 +281,30 @@ config CMA_AREAS
endif
+config SYNC
+ bool "Synchronization framework"
+ default n
+ select ANON_INODES
+ help
+ This option enables the framework for synchronization between multiple
+ drivers. Sync implementations can take advantage of hardware
+ synchronization built into devices like GPUs.
+
+config SW_SYNC
+ bool "Software synchronization objects"
+ default n
+ depends on SYNC
+ help
+ A sync object driver that uses a 32bit counter to coordinate
+ syncrhronization. Useful when there is no hardware primitive backing
+ the synchronization.
+
+config SW_SYNC_USER
+ bool "Userspace API for SW_SYNC"
+ default n
+ depends on SW_SYNC
+ help
+ Provides a user space API to the sw sync object.
+ *WARNING* improper use of this can result in deadlocking kernel
+ drivers from userspace.
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..1128a612e24d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -22,5 +22,8 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_SYNC) += sync.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 63452943abd1..fb10728f6372 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
+ * on the linux-kernel list, you have been warned.
*/
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d81460309182..b392b353be39 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -305,7 +305,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
- if (size < 0)
+ if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..72c4939678dd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,8 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -54,6 +56,12 @@ struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static void dpm_drv_timeout(unsigned long data);
+struct dpm_drv_wd_data {
+ struct device *dev;
+ struct task_struct *tsk;
+};
+
static int async_error;
/**
@@ -665,6 +673,30 @@ static bool is_async(struct device *dev)
}
/**
+ * dpm_drv_timeout - Driver suspend / resume watchdog handler
+ * @data: struct device which timed out
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so
+ * BUG() out for a crash-dump
+ *
+ */
+static void dpm_drv_timeout(unsigned long data)
+{
+ struct dpm_drv_wd_data *wd_data = (void *)data;
+ struct device *dev = wd_data->dev;
+ struct task_struct *tsk = wd_data->tsk;
+
+ printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
+ (dev->driver ? dev->driver->name : "no driver"));
+
+ printk(KERN_EMERG "dpm suspend stack:\n");
+ show_stack(tsk, NULL);
+
+ BUG();
+}
+
+/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
@@ -1055,6 +1087,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ struct timer_list timer;
+ struct dpm_drv_wd_data data;
dpm_wait_for_children(dev, async);
@@ -1078,6 +1112,14 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ data.dev = dev;
+ data.tsk = get_current();
+ init_timer_on_stack(&timer);
+ timer.expires = jiffies + HZ * 12;
+ timer.function = dpm_drv_timeout;
+ timer.data = (unsigned long)&data;
+ add_timer(&timer);
+
device_lock(dev);
if (dev->pm_domain) {
@@ -1133,6 +1175,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_unlock(dev);
+ del_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
Complete:
complete_all(&dev->power.completion);
if (error)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 07aad786f817..46a213a596e2 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
@@ -91,8 +104,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- break;
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ return base;
+ }
c->min = p;
c->base_reg = i;
}
@@ -101,14 +116,34 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
}
}
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ } else {
+ return base;
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ if (list_empty(&map->debugfs_off_cache)) {
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ return base;
+ }
+
/* Find the relevant block */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
- if (*pos >= c->min && *pos <= c->max) {
+ if (from >= c->min && from <= c->max) {
*pos = c->min;
return c->base_reg;
}
- ret = c->max;
+ *pos = c->min;
+ ret = c->base_reg;
}
return ret;
@@ -387,16 +422,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- struct regmap_debugfs_off_cache *c;
-
debugfs_remove_recursive(map->debugfs);
- while (!list_empty(&map->debugfs_off_cache)) {
- c = list_first_entry(&map->debugfs_off_cache,
- struct regmap_debugfs_off_cache,
- list);
- list_del(&c->list);
- kfree(c);
- }
+ regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c
new file mode 100644
index 000000000000..b4d8529ee892
--- /dev/null
+++ b/drivers/base/sw_sync.c
@@ -0,0 +1,262 @@
+/*
+ * drivers/base/sw_sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/sw_sync.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static int sw_sync_cmp(u32 a, u32 b)
+{
+ if (a == b)
+ return 0;
+
+ return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+{
+ struct sw_sync_pt *pt;
+
+ pt = (struct sw_sync_pt *)
+ sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+
+ pt->value = value;
+
+ return (struct sync_pt *)pt;
+}
+EXPORT_SYMBOL(sw_sync_pt_create);
+
+static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
+ struct sw_sync_timeline *obj =
+ (struct sw_sync_timeline *)sync_pt->parent;
+
+ return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
+}
+
+static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ struct sw_sync_timeline *obj =
+ (struct sw_sync_timeline *)sync_pt->parent;
+
+ return sw_sync_cmp(obj->value, pt->value) >= 0;
+}
+
+static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
+ struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
+
+ return sw_sync_cmp(pt_a->value, pt_b->value);
+}
+
+static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
+ void *data, int size)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+ if (size < sizeof(pt->value))
+ return -ENOMEM;
+
+ memcpy(data, &pt->value, sizeof(pt->value));
+
+ return sizeof(pt->value);
+}
+
+static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+ char *str, int size)
+{
+ struct sw_sync_timeline *timeline =
+ (struct sw_sync_timeline *)sync_timeline;
+ snprintf(str, size, "%d", timeline->value);
+}
+
+static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
+ char *str, int size)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ snprintf(str, size, "%d", pt->value);
+}
+
+struct sync_timeline_ops sw_sync_timeline_ops = {
+ .driver_name = "sw_sync",
+ .dup = sw_sync_pt_dup,
+ .has_signaled = sw_sync_pt_has_signaled,
+ .compare = sw_sync_pt_compare,
+ .fill_driver_data = sw_sync_fill_driver_data,
+ .timeline_value_str = sw_sync_timeline_value_str,
+ .pt_value_str = sw_sync_pt_value_str,
+};
+
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
+ sync_timeline_create(&sw_sync_timeline_ops,
+ sizeof(struct sw_sync_timeline),
+ name);
+
+ return obj;
+}
+EXPORT_SYMBOL(sw_sync_timeline_create);
+
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+ obj->value += inc;
+
+ sync_timeline_signal(&obj->obj);
+}
+EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+#ifdef CONFIG_SW_SYNC_USER
+/* *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+int sw_sync_open(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, current);
+
+ obj = sw_sync_timeline_create(task_comm);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
+}
+
+int sw_sync_release(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+ sync_timeline_destroy(&obj->obj);
+ return 0;
+}
+
+long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
+{
+ int fd = get_unused_fd();
+ int err;
+ struct sync_pt *pt;
+ struct sync_fence *fence;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sw_sync_pt_create(obj, data.value);
+ if (pt == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence = sync_fence_create(data.name, pt);
+ if (fence == NULL) {
+ sync_pt_free(pt);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ sync_fence_put(fence);
+ err = -EFAULT;
+ goto err;
+ }
+
+ sync_fence_install(fence, fd);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
+
+long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sw_sync_timeline_inc(obj, value);
+
+ return 0;
+}
+
+long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sw_sync_fops = {
+ .owner = THIS_MODULE,
+ .open = sw_sync_open,
+ .release = sw_sync_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+};
+
+static struct miscdevice sw_sync_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sw_sync",
+ .fops = &sw_sync_fops,
+};
+
+int __init sw_sync_device_init(void)
+{
+ return misc_register(&sw_sync_dev);
+}
+
+void __exit sw_sync_device_remove(void)
+{
+ misc_deregister(&sw_sync_dev);
+}
+
+module_init(sw_sync_device_init);
+module_exit(sw_sync_device_remove);
+
+#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
new file mode 100644
index 000000000000..809d02b21e08
--- /dev/null
+++ b/drivers/base/sync.c
@@ -0,0 +1,1009 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sync.h>
+#include <linux/uaccess.h>
+
+#include <linux/anon_inodes.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sync.h>
+
+static void sync_fence_signal_pt(struct sync_pt *pt);
+static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+ int size, const char *name)
+{
+ struct sync_timeline *obj;
+ unsigned long flags;
+
+ if (size < sizeof(struct sync_timeline))
+ return NULL;
+
+ obj = kzalloc(size, GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->ops = ops;
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ INIT_LIST_HEAD(&obj->child_list_head);
+ spin_lock_init(&obj->child_list_lock);
+
+ INIT_LIST_HEAD(&obj->active_list_head);
+ spin_lock_init(&obj->active_list_lock);
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ return obj;
+}
+EXPORT_SYMBOL(sync_timeline_create);
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+ unsigned long flags;
+
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_del(&obj->sync_timeline_list);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ kfree(obj);
+}
+
+void sync_timeline_destroy(struct sync_timeline *obj)
+{
+ obj->destroyed = true;
+
+ /*
+ * If this is not the last reference, signal any children
+ * that their parent is going away.
+ */
+
+ if (!kref_put(&obj->kref, sync_timeline_free))
+ sync_timeline_signal(obj);
+}
+EXPORT_SYMBOL(sync_timeline_destroy);
+
+static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
+{
+ unsigned long flags;
+
+ pt->parent = obj;
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ list_add_tail(&pt->child_list, &obj->child_list_head);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_timeline_remove_pt(struct sync_pt *pt)
+{
+ struct sync_timeline *obj = pt->parent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&obj->active_list_lock, flags);
+ if (!list_empty(&pt->active_list))
+ list_del_init(&pt->active_list);
+ spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ if (!list_empty(&pt->child_list)) {
+ list_del_init(&pt->child_list);
+ }
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+void sync_timeline_signal(struct sync_timeline *obj)
+{
+ unsigned long flags;
+ LIST_HEAD(signaled_pts);
+ struct list_head *pos, *n;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irqsave(&obj->active_list_lock, flags);
+
+ list_for_each_safe(pos, n, &obj->active_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, active_list);
+
+ if (_sync_pt_has_signaled(pt)) {
+ list_del_init(pos);
+ list_add(&pt->signaled_list, &signaled_pts);
+ kref_get(&pt->fence->kref);
+ }
+ }
+
+ spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+ list_for_each_safe(pos, n, &signaled_pts) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, signaled_list);
+
+ list_del_init(pos);
+ sync_fence_signal_pt(pt);
+ kref_put(&pt->fence->kref, sync_fence_free);
+ }
+}
+EXPORT_SYMBOL(sync_timeline_signal);
+
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
+{
+ struct sync_pt *pt;
+
+ if (size < sizeof(struct sync_pt))
+ return NULL;
+
+ pt = kzalloc(size, GFP_KERNEL);
+ if (pt == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&pt->active_list);
+ kref_get(&parent->kref);
+ sync_timeline_add_pt(parent, pt);
+
+ return pt;
+}
+EXPORT_SYMBOL(sync_pt_create);
+
+void sync_pt_free(struct sync_pt *pt)
+{
+ if (pt->parent->ops->free_pt)
+ pt->parent->ops->free_pt(pt);
+
+ sync_timeline_remove_pt(pt);
+
+ kref_put(&pt->parent->kref, sync_timeline_free);
+
+ kfree(pt);
+}
+EXPORT_SYMBOL(sync_pt_free);
+
+/* call with pt->parent->active_list_lock held */
+static int _sync_pt_has_signaled(struct sync_pt *pt)
+{
+ int old_status = pt->status;
+
+ if (!pt->status)
+ pt->status = pt->parent->ops->has_signaled(pt);
+
+ if (!pt->status && pt->parent->destroyed)
+ pt->status = -ENOENT;
+
+ if (pt->status != old_status)
+ pt->timestamp = ktime_get();
+
+ return pt->status;
+}
+
+static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
+{
+ return pt->parent->ops->dup(pt);
+}
+
+/* Adds a sync pt to the active queue. Called when added to a fence */
+static void sync_pt_activate(struct sync_pt *pt)
+{
+ struct sync_timeline *obj = pt->parent;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&obj->active_list_lock, flags);
+
+ err = _sync_pt_has_signaled(pt);
+ if (err != 0)
+ goto out;
+
+ list_add_tail(&pt->active_list, &obj->active_list_head);
+
+out:
+ spin_unlock_irqrestore(&obj->active_list_lock, flags);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file);
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+
+static const struct file_operations sync_fence_fops = {
+ .release = sync_fence_release,
+ .poll = sync_fence_poll,
+ .unlocked_ioctl = sync_fence_ioctl,
+};
+
+static struct sync_fence *sync_fence_alloc(const char *name)
+{
+ struct sync_fence *fence;
+ unsigned long flags;
+
+ fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
+ fence, 0);
+ if (fence->file == NULL)
+ goto err;
+
+ kref_init(&fence->kref);
+ strlcpy(fence->name, name, sizeof(fence->name));
+
+ INIT_LIST_HEAD(&fence->pt_list_head);
+ INIT_LIST_HEAD(&fence->waiter_list_head);
+ spin_lock_init(&fence->waiter_list_lock);
+
+ init_waitqueue_head(&fence->wq);
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+ return fence;
+
+err:
+ kfree(fence);
+ return NULL;
+}
+
+/* TODO: implement a create which takes more that one sync_pt */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
+{
+ struct sync_fence *fence;
+
+ if (pt->fence)
+ return NULL;
+
+ fence = sync_fence_alloc(name);
+ if (fence == NULL)
+ return NULL;
+
+ pt->fence = fence;
+ list_add(&pt->pt_list, &fence->pt_list_head);
+ sync_pt_activate(pt);
+
+ /*
+ * signal the fence in case pt was activated before
+ * sync_pt_activate(pt) was called
+ */
+ sync_fence_signal_pt(pt);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_fence_create);
+
+static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, &src->pt_list_head) {
+ struct sync_pt *orig_pt =
+ container_of(pos, struct sync_pt, pt_list);
+ struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+
+ if (new_pt == NULL)
+ return -ENOMEM;
+
+ new_pt->fence = dst;
+ list_add(&new_pt->pt_list, &dst->pt_list_head);
+ sync_pt_activate(new_pt);
+ }
+
+ return 0;
+}
+
+static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+ struct list_head *src_pos, *dst_pos, *n;
+
+ list_for_each(src_pos, &src->pt_list_head) {
+ struct sync_pt *src_pt =
+ container_of(src_pos, struct sync_pt, pt_list);
+ bool collapsed = false;
+
+ list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
+ struct sync_pt *dst_pt =
+ container_of(dst_pos, struct sync_pt, pt_list);
+ /* collapse two sync_pts on the same timeline
+ * to a single sync_pt that will signal at
+ * the later of the two
+ */
+ if (dst_pt->parent == src_pt->parent) {
+ if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
+ struct sync_pt *new_pt =
+ sync_pt_dup(src_pt);
+ if (new_pt == NULL)
+ return -ENOMEM;
+
+ new_pt->fence = dst;
+ list_replace(&dst_pt->pt_list,
+ &new_pt->pt_list);
+ sync_pt_activate(new_pt);
+ sync_pt_free(dst_pt);
+ }
+ collapsed = true;
+ break;
+ }
+ }
+
+ if (!collapsed) {
+ struct sync_pt *new_pt = sync_pt_dup(src_pt);
+
+ if (new_pt == NULL)
+ return -ENOMEM;
+
+ new_pt->fence = dst;
+ list_add(&new_pt->pt_list, &dst->pt_list_head);
+ sync_pt_activate(new_pt);
+ }
+ }
+
+ return 0;
+}
+
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &fence->pt_list_head) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+ sync_timeline_remove_pt(pt);
+ }
+}
+
+static void sync_fence_free_pts(struct sync_fence *fence)
+{
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &fence->pt_list_head) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+ sync_pt_free(pt);
+ }
+}
+
+struct sync_fence *sync_fence_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (file == NULL)
+ return NULL;
+
+ if (file->f_op != &sync_fence_fops)
+ goto err;
+
+ return file->private_data;
+
+err:
+ fput(file);
+ return NULL;
+}
+EXPORT_SYMBOL(sync_fence_fdget);
+
+void sync_fence_put(struct sync_fence *fence)
+{
+ fput(fence->file);
+}
+EXPORT_SYMBOL(sync_fence_put);
+
+void sync_fence_install(struct sync_fence *fence, int fd)
+{
+ fd_install(fd, fence->file);
+}
+EXPORT_SYMBOL(sync_fence_install);
+
+static int sync_fence_get_status(struct sync_fence *fence)
+{
+ struct list_head *pos;
+ int status = 1;
+
+ list_for_each(pos, &fence->pt_list_head) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+ int pt_status = pt->status;
+
+ if (pt_status < 0) {
+ status = pt_status;
+ break;
+ } else if (status == 1) {
+ status = pt_status;
+ }
+ }
+
+ return status;
+}
+
+struct sync_fence *sync_fence_merge(const char *name,
+ struct sync_fence *a, struct sync_fence *b)
+{
+ struct sync_fence *fence;
+ int err;
+
+ fence = sync_fence_alloc(name);
+ if (fence == NULL)
+ return NULL;
+
+ err = sync_fence_copy_pts(fence, a);
+ if (err < 0)
+ goto err;
+
+ err = sync_fence_merge_pts(fence, b);
+ if (err < 0)
+ goto err;
+
+ /*
+ * signal the fence in case one of it's pts were activated before
+ * they were activated
+ */
+ sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
+ struct sync_pt,
+ pt_list));
+
+ return fence;
+err:
+ sync_fence_free_pts(fence);
+ kfree(fence);
+ return NULL;
+}
+EXPORT_SYMBOL(sync_fence_merge);
+
+static void sync_fence_signal_pt(struct sync_pt *pt)
+{
+ LIST_HEAD(signaled_waiters);
+ struct sync_fence *fence = pt->fence;
+ struct list_head *pos;
+ struct list_head *n;
+ unsigned long flags;
+ int status;
+
+ status = sync_fence_get_status(fence);
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ /*
+ * this should protect against two threads racing on the signaled
+ * false -> true transition
+ */
+ if (status && !fence->status) {
+ list_for_each_safe(pos, n, &fence->waiter_list_head)
+ list_move(pos, &signaled_waiters);
+
+ fence->status = status;
+ } else {
+ status = 0;
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+ if (status) {
+ list_for_each_safe(pos, n, &signaled_waiters) {
+ struct sync_fence_waiter *waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+
+ list_del(pos);
+ waiter->callback(fence, waiter);
+ }
+ wake_up(&fence->wq);
+ }
+}
+
+int sync_fence_wait_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+
+ if (fence->status) {
+ err = fence->status;
+ goto out;
+ }
+
+ list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
+out:
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ struct list_head *pos;
+ struct list_head *n;
+ unsigned long flags;
+ int ret = -ENOENT;
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ /*
+ * Make sure waiter is still in waiter_list because it is possible for
+ * the waiter to be removed from the list while the callback is still
+ * pending.
+ */
+ list_for_each_safe(pos, n, &fence->waiter_list_head) {
+ struct sync_fence_waiter *list_waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+ if (list_waiter == waiter) {
+ list_del(pos);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+static bool sync_fence_check(struct sync_fence *fence)
+{
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+ return fence->status != 0;
+}
+
+int sync_fence_wait(struct sync_fence *fence, long timeout)
+{
+ int err = 0;
+ struct sync_pt *pt;
+
+ trace_sync_wait(fence, 1);
+ list_for_each_entry(pt, &fence->pt_list_head, pt_list)
+ trace_sync_pt(pt);
+
+ if (timeout > 0) {
+ timeout = msecs_to_jiffies(timeout);
+ err = wait_event_interruptible_timeout(fence->wq,
+ sync_fence_check(fence),
+ timeout);
+ } else if (timeout < 0) {
+ err = wait_event_interruptible(fence->wq,
+ sync_fence_check(fence));
+ }
+ trace_sync_wait(fence, 0);
+
+ if (err < 0)
+ return err;
+
+ if (fence->status < 0) {
+ pr_info("fence error %d on [%p]\n", fence->status, fence);
+ sync_dump();
+ return fence->status;
+ }
+
+ if (fence->status == 0) {
+ pr_info("fence timeout on [%p] after %dms\n", fence,
+ jiffies_to_msecs(timeout));
+ sync_dump();
+ return -ETIME;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+ struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+ sync_fence_free_pts(fence);
+
+ kfree(fence);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file)
+{
+ struct sync_fence *fence = file->private_data;
+ unsigned long flags;
+
+ /*
+ * We need to remove all ways to access this fence before droping
+ * our ref.
+ *
+ * start with its membership in the global fence list
+ */
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_del(&fence->sync_fence_list);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+ /*
+ * remove its pts from their parents so that sync_timeline_signal()
+ * can't reference the fence.
+ */
+ sync_fence_detach_pts(fence);
+
+ kref_put(&fence->kref, sync_fence_free);
+
+ return 0;
+}
+
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+ struct sync_fence *fence = file->private_data;
+
+ poll_wait(file, &fence->wq, wait);
+
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+
+ if (fence->status == 1)
+ return POLLIN;
+ else if (fence->status < 0)
+ return POLLERR;
+ else
+ return 0;
+}
+
+static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
+{
+ __s32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ return sync_fence_wait(fence, value);
+}
+
+static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+{
+ int fd = get_unused_fd();
+ int err;
+ struct sync_fence *fence2, *fence3;
+ struct sync_merge_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
+
+ fence2 = sync_fence_fdget(data.fd2);
+ if (fence2 == NULL) {
+ err = -ENOENT;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence3 = sync_fence_merge(data.name, fence, fence2);
+ if (fence3 == NULL) {
+ err = -ENOMEM;
+ goto err_put_fence2;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fence3;
+ }
+
+ sync_fence_install(fence3, fd);
+ sync_fence_put(fence2);
+ return 0;
+
+err_put_fence3:
+ sync_fence_put(fence3);
+
+err_put_fence2:
+ sync_fence_put(fence2);
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+{
+ struct sync_pt_info *info = data;
+ int ret;
+
+ if (size < sizeof(struct sync_pt_info))
+ return -ENOMEM;
+
+ info->len = sizeof(struct sync_pt_info);
+
+ if (pt->parent->ops->fill_driver_data) {
+ ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
+ size - sizeof(*info));
+ if (ret < 0)
+ return ret;
+
+ info->len += ret;
+ }
+
+ strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
+ strlcpy(info->driver_name, pt->parent->ops->driver_name,
+ sizeof(info->driver_name));
+ info->status = pt->status;
+ info->timestamp_ns = ktime_to_ns(pt->timestamp);
+
+ return info->len;
+}
+
+static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+ unsigned long arg)
+{
+ struct sync_fence_info_data *data;
+ struct list_head *pos;
+ __u32 size;
+ __u32 len = 0;
+ int ret;
+
+ if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
+ return -EFAULT;
+
+ if (size < sizeof(struct sync_fence_info_data))
+ return -EINVAL;
+
+ if (size > 4096)
+ size = 4096;
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ strlcpy(data->name, fence->name, sizeof(data->name));
+ data->status = fence->status;
+ len = sizeof(struct sync_fence_info_data);
+
+ list_for_each(pos, &fence->pt_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, pt_list);
+
+ ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+
+ if (ret < 0)
+ goto out;
+
+ len += ret;
+ }
+
+ data->len = len;
+
+ if (copy_to_user((void __user *)arg, data, len))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+out:
+ kfree(data);
+
+ return ret;
+}
+
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_fence *fence = file->private_data;
+ switch (cmd) {
+ case SYNC_IOC_WAIT:
+ return sync_fence_ioctl_wait(fence, arg);
+
+ case SYNC_IOC_MERGE:
+ return sync_fence_ioctl_merge(fence, arg);
+
+ case SYNC_IOC_FENCE_INFO:
+ return sync_fence_ioctl_fence_info(fence, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *sync_status_str(int status)
+{
+ if (status > 0)
+ return "signaled";
+ else if (status == 0)
+ return "active";
+ else
+ return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+ int status = pt->status;
+ seq_printf(s, " %s%spt %s",
+ fence ? pt->parent->name : "",
+ fence ? "_" : "",
+ sync_status_str(status));
+ if (pt->status) {
+ struct timeval tv = ktime_to_timeval(pt->timestamp);
+ seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+ }
+
+ if (pt->parent->ops->timeline_value_str &&
+ pt->parent->ops->pt_value_str) {
+ char value[64];
+ pt->parent->ops->pt_value_str(pt, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ if (fence) {
+ pt->parent->ops->timeline_value_str(pt->parent, value,
+ sizeof(value));
+ seq_printf(s, " / %s", value);
+ }
+ } else if (pt->parent->ops->print_pt) {
+ seq_printf(s, ": ");
+ pt->parent->ops->print_pt(s, pt);
+ }
+
+ seq_printf(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+ if (obj->ops->timeline_value_str) {
+ char value[64];
+ obj->ops->timeline_value_str(obj, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ } else if (obj->ops->print_obj) {
+ seq_printf(s, ": ");
+ obj->ops->print_obj(s, obj);
+ }
+
+ seq_printf(s, "\n");
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ list_for_each(pos, &obj->child_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_pt(s, pt, false);
+ }
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+ sync_status_str(fence->status));
+
+ list_for_each(pos, &fence->pt_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, pt_list);
+ sync_print_pt(s, pt, true);
+ }
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ list_for_each(pos, &fence->waiter_list_head) {
+ struct sync_fence_waiter *waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+
+ seq_printf(s, "waiter %pF\n", waiter->callback);
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct list_head *pos;
+
+ seq_printf(s, "objs:\n--------------\n");
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_for_each(pos, &sync_timeline_list_head) {
+ struct sync_timeline *obj =
+ container_of(pos, struct sync_timeline,
+ sync_timeline_list);
+
+ sync_print_obj(s, obj);
+ seq_printf(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ seq_printf(s, "fences:\n--------------\n");
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_for_each(pos, &sync_fence_list_head) {
+ struct sync_fence *fence =
+ container_of(pos, struct sync_fence, sync_fence_list);
+
+ sync_print_fence(s, fence);
+ seq_printf(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+ .open = sync_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+ debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+ return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+ struct seq_file s = {
+ .buf = sync_dump_buf,
+ .size = sizeof(sync_dump_buf) - 1,
+ };
+ int i;
+
+ sync_debugfs_show(&s, NULL);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
+#else
+static void sync_dump(void)
+{
+}
+#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72bedad6bf8c..f382896fb2b9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@ menu "Character devices"
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
@@ -583,6 +596,10 @@ config DEVPORT
depends on ISA || PCI
default y
+config DCC_TTY
+ tristate "DCC tty driver"
+ depends on ARM
+
source "drivers/s390/char/Kconfig"
config MSM_SMD_PKT
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d208a7..e0047ed1e74c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
+obj-$(CONFIG_DCC_TTY) += dcc_tty.o
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 000000000000..a787accdcb14
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+ char ch;
+ int rch;
+ int written;
+
+ while (g_dcc_buffer_count) {
+ ch = g_dcc_buffer[g_dcc_buffer_head];
+ asm(
+ "mrc 14, 0, r15, c0, c1, 0\n"
+ "mcrcc 14, 0, %1, c0, c5, 0\n"
+ "movcc %0, #1\n"
+ "movcs %0, #0\n"
+ : "=r" (written)
+ : "r" (ch)
+ );
+ if (written) {
+ if (ch == '\n')
+ g_dcc_buffer[g_dcc_buffer_head] = '\r';
+ else {
+ g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+ g_dcc_buffer_count--;
+ if (g_dcc_tty)
+ tty_wakeup(g_dcc_tty);
+ }
+ g_dcc_write_delay_usecs = 1;
+ } else {
+ if (g_dcc_write_delay_usecs > 0x100)
+ break;
+ g_dcc_write_delay_usecs <<= 1;
+ udelay(g_dcc_write_delay_usecs);
+ }
+ }
+
+ if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "tst %0, #(1 << 30)\n"
+ "moveq %0, #-1\n"
+ "mrcne 14, 0, %0, c0, c5, 0\n"
+ : "=r" (rch)
+ );
+ if (rch >= 0) {
+ ch = rch;
+ tty_insert_flip_string(g_dcc_tty, &ch, 1);
+ tty_flip_buffer_push(g_dcc_tty);
+ }
+ }
+
+
+ if (g_dcc_buffer_count)
+ hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+ else
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+ g_dcc_tty = tty;
+ g_dcc_tty_open_count++;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+ printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+ return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+ if (g_dcc_tty == tty) {
+ if (--g_dcc_tty_open_count == 0)
+ g_dcc_tty = NULL;
+ }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+ const unsigned char *buf = buf_start;
+ unsigned long irq_flags;
+ int copy_len;
+ int space_left;
+ int tail;
+
+ if (count < 1)
+ return 0;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ do {
+ tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+ copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ if (copy_len > space_left)
+ copy_len = space_left;
+ if (copy_len > count)
+ copy_len = count;
+ memcpy(&g_dcc_buffer[tail], buf, copy_len);
+ g_dcc_buffer_count += copy_len;
+ buf += copy_len;
+ count -= copy_len;
+ if (copy_len < count && copy_len < space_left) {
+ space_left -= copy_len;
+ copy_len = count;
+ if (copy_len > space_left) {
+ copy_len = space_left;
+ }
+ memcpy(g_dcc_buffer, buf, copy_len);
+ buf += copy_len;
+ count -= copy_len;
+ g_dcc_buffer_count += copy_len;
+ }
+ dcc_poll_locked();
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ } while(count && space_left);
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int ret;
+ /* printk("dcc_tty_write %p, %d\n", buf, count); */
+ ret = dcc_write(buf, count);
+ if (ret != count)
+ printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+ return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+ int space_left;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ int ret;
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "mov %0, %0, LSR #30\n"
+ "and %0, %0, #1\n"
+ : "=r" (ret)
+ );
+ return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+ dcc_write(b, count);
+#else
+ /* blocking printk */
+ while (count > 0) {
+ int written;
+ written = dcc_write(b, count);
+ if (written) {
+ b += written;
+ count -= written;
+ }
+ }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+ *index = 0;
+ return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+ if (co->index != 0)
+ return -ENODEV;
+ return 0;
+}
+
+
+static struct console dcc_console =
+{
+ .name = "ttyDCC",
+ .write = dcc_console_write,
+ .device = dcc_console_device,
+ .setup = dcc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+ .open = dcc_tty_open,
+ .close = dcc_tty_close,
+ .write = dcc_tty_write,
+ .write_room = dcc_tty_write_room,
+ .chars_in_buffer = dcc_tty_chars_in_buffer,
+ .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+ int ret;
+
+ hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ g_dcc_timer.function = dcc_tty_timer_func;
+
+ g_dcc_tty_driver = alloc_tty_driver(1);
+ if (!g_dcc_tty_driver) {
+ printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ g_dcc_tty_driver->owner = THIS_MODULE;
+ g_dcc_tty_driver->driver_name = "dcc";
+ g_dcc_tty_driver->name = "ttyDCC";
+ g_dcc_tty_driver->major = 0; // auto assign
+ g_dcc_tty_driver->minor_start = 0;
+ g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ g_dcc_tty_driver->init_termios = tty_std_termios;
+ g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+ ret = tty_register_driver(g_dcc_tty_driver);
+ if (ret) {
+ printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+ goto err_tty_register_driver_failed;
+ }
+ tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+ register_console(&dcc_console);
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(g_dcc_tty_driver);
+ g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+ return ret;
+}
+
+static void __exit dcc_tty_exit(void)
+{
+ int ret;
+
+ tty_unregister_device(g_dcc_tty_driver, 0);
+ ret = tty_unregister_driver(g_dcc_tty_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+ } else {
+ put_tty_driver(g_dcc_tty_driver);
+ }
+ g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa8..67238abc3776 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -59,6 +59,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
}
#endif
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
#ifdef CONFIG_STRICT_DEVMEM
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
@@ -84,7 +85,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#endif
+#endif
+#ifdef CONFIG_DEVMEM
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
@@ -211,6 +214,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
*ppos += written;
return written;
}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
int __weak phys_mem_access_prot_allowed(struct file *file,
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -332,6 +338,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
}
return 0;
}
+#endif /* CONFIG_DEVMEM */
#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -696,6 +703,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
return file->f_pos = 0;
}
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
@@ -729,10 +738,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
static int open_port(struct inode * inode, struct file * filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
+#endif
#define zero_lseek null_lseek
#define full_lseek null_lseek
@@ -742,6 +755,7 @@ static int open_port(struct inode * inode, struct file * filp)
#define open_kmem open_mem
#define open_oldmem open_mem
+#ifdef CONFIG_DEVMEM
static const struct file_operations mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
@@ -750,6 +764,7 @@ static const struct file_operations mem_fops = {
.open = open_mem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
@@ -815,7 +830,9 @@ static const struct memdev {
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
+#ifdef CONFIG_DEVMEM
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e0a899f25e37..7083716d142f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+ bool "interactive"
+ select CPU_FREQ_GOV_INTERACTIVE
+ help
+ Use the CPUFreq governor 'interactive' as default. This allows
+ you to get a full dynamic cpu frequency capable system by simply
+ loading your cpufreq low-level hardware driver, using the
+ 'interactive' governor for latency-sensitive workloads.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -160,6 +170,23 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_INTERACTIVE
+ tristate "'interactive' cpufreq policy governor"
+ help
+ 'interactive' - This driver adds a dynamic cpufreq policy governor
+ designed for latency-sensitive workloads.
+
+ This governor attempts to reduce the latency of clock
+ increases so that the system is more responsive to
+ interactive workloads.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_interactive.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index fadc4d496e2f..ce4ffd77ffa1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..5f72ad486746 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,6 +47,9 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
+/* Used when we unregister cpufreq driver */
+static struct cpumask cpufreq_online_mask;
+
/*
* cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
* all cpufreq/hotplug/workqueue/etc related lock issues.
@@ -700,87 +703,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-/*
- * Returns:
- * Negative: Failure
- * 0: Success
- * Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct device *dev)
-{
- int ret = 0;
-#ifdef CONFIG_SMP
- unsigned long flags;
- unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_governor *gov;
-
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
- for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
-
- if (cpu == j)
- continue;
-
- /* Check for existing affected CPUs.
- * They may not be aware of it due to CPU Hotplug.
- * cpufreq_cpu_put is called when the device is removed
- * in __cpufreq_remove_dev()
- */
- managed_policy = cpufreq_cpu_get(j);
- if (unlikely(managed_policy)) {
-
- /* Set proper policy_cpu */
- unlock_policy_rwsem_write(cpu);
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
- if (lock_policy_rwsem_write(cpu) < 0) {
- /* Should not go through policy unlock path */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- cpufreq_cpu_put(managed_policy);
- return -EBUSY;
- }
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&dev->kobj,
- &managed_policy->kobj,
- "cpufreq");
- if (ret)
- cpufreq_cpu_put(managed_policy);
- /*
- * Success. We only needed to be added to the mask.
- * Call driver->exit() because only the cpu parent of
- * the kobj needed to call init().
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- if (!ret)
- return 1;
- else
- return ret;
- }
- }
-#endif
- return ret;
-}
-
-
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
@@ -885,6 +807,42 @@ err_out_kobj_put:
return ret;
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
+ struct device *dev)
+{
+ struct cpufreq_policy *policy;
+ int ret = 0;
+ unsigned long flags;
+
+ policy = cpufreq_cpu_get(sibling);
+ WARN_ON(!policy);
+
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
+
+ lock_policy_rwsem_write(cpu);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ unlock_policy_rwsem_write(cpu);
+
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
/**
* cpufreq_add_dev - add a CPU device
@@ -897,12 +855,12 @@ err_out_kobj_put:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
- int ret = 0, found = 0;
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
- unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
+ struct cpufreq_governor *gov;
int sibling;
#endif
@@ -919,6 +877,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy);
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ for_each_online_cpu(sibling) {
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
+ return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ }
+#endif
#endif
if (!try_module_get(cpufreq_driver->owner)) {
@@ -926,7 +893,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto module_out;
}
- ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
@@ -970,20 +936,28 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
pr_debug("initialization failed\n");
goto err_unlock_policy;
}
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+ cpumask_and(policy->cpus, policy->cpus, &cpufreq_online_mask);
+
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, dev);
- if (ret) {
- if (ret > 0)
- /* This is a managed cpu, symlink created,
- exit with 0 */
- ret = 0;
- goto err_unlock_policy;
+#ifdef CONFIG_HOTPLUG_CPU
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+ if (gov) {
+ policy->governor = gov;
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
}
+#endif
ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
@@ -997,7 +971,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0;
-
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
@@ -1020,6 +993,25 @@ module_out:
return ret;
}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ int j;
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+ for_each_cpu(j, policy->cpus) {
+ if (!cpu_online(j))
+ continue;
+ per_cpu(cpufreq_policy_cpu, j) = cpu;
+ }
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
/**
* __cpufreq_remove_dev - remove a CPU device
@@ -1030,128 +1022,92 @@ module_out:
*/
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
+ unsigned int cpu = dev->id, ret, cpus;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
-#ifdef CONFIG_SMP
struct device *cpu_dev;
- unsigned int j;
-#endif
- pr_debug("unregistering CPU %u\n", cpu);
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
data = per_cpu(cpufreq_cpu_data, cpu);
if (!data) {
+ pr_debug("%s: No cpu_data found\n", __func__);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
-
-#ifdef CONFIG_SMP
- /* if this isn't the CPU which is the parent of the kobj, we
- * only need to unlink, put and exit
- */
- if (unlikely(cpu != data->cpu)) {
- pr_debug("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &dev->kobj;
- cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- return 0;
- }
-#endif
-
-#ifdef CONFIG_SMP
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_STOP);
#ifdef CONFIG_HOTPLUG_CPU
strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
CPUFREQ_NAME_LEN);
#endif
- /* if we have other CPUs still registered, we need to unlink them,
- * or else wait_for_completion below will lock up. Clean the
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- per_cpu(cpufreq_cpu_data, j) = NULL;
- }
- }
-
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ cpus = cpumask_weight(data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- pr_debug("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, j),
- data->governor->name, CPUFREQ_NAME_LEN);
-#endif
- cpu_dev = get_cpu_device(j);
- kobj = &cpu_dev->kobj;
+ if (unlikely((cpu == data->cpu) && (cpus > 1))) {
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
+ cpumask_set_cpu(cpu, data->cpus);
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+ "cpufreq");
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- cpufreq_cpu_put(data);
+ return -EINVAL;
}
+
+ update_policy_cpu(data, cpu_dev->id);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, cpu_dev->id, cpu);
}
-#else
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
- kobject_put(kobj);
-
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
+ sysfs_remove_link(&dev->kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ lock_policy_rwsem_write(cpu);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_write(cpu);
+ kobject_put(kobj);
-#ifdef CONFIG_HOTPLUG_CPU
- /* when the CPU which is the parent of the kobj is hotplugged
- * offline, check for siblings, and create cpufreq sysfs interface
- * and symlinks
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- /* first sibling now owns the new sysfs dir */
- cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
+ /* we need to make sure that the underlying kobj is actually
+ * not referenced anymore by anybody before we proceed with
+ * unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
- /* finally remove our own symlink */
lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(dev, sif);
- }
-#endif
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(data);
+ unlock_policy_rwsem_write(cpu);
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
+ kfree(data);
+ } else if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ }
return 0;
}
@@ -1168,6 +1124,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
+ cpumask_clear_cpu(cpu, &cpufreq_online_mask);
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1886,6 +1843,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ cpumask_setall(&cpufreq_online_mask);
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_null_driver;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 000000000000..7d1952c5cb16
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,1066 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <asm/cputime.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+static int active_count;
+
+struct cpufreq_interactive_cpuinfo {
+ struct timer_list cpu_timer;
+ struct timer_list cpu_slack_timer;
+ spinlock_t load_lock; /* protects the next 4 fields */
+ u64 time_in_idle;
+ u64 time_in_idle_timestamp;
+ u64 cputime_speedadj;
+ u64 cputime_speedadj_timestamp;
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int target_freq;
+ unsigned int floor_freq;
+ u64 floor_validate_time;
+ u64 hispeed_validate_time;
+ struct rw_semaphore enable_sem;
+ int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Hi speed to bump to from lo speed when load burst (default max) */
+static unsigned int hispeed_freq;
+
+/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+static spinlock_t target_loads_lock;
+static unsigned int *target_loads = default_target_loads;
+static int ntarget_loads = ARRAY_SIZE(default_target_loads);
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+
+/*
+ * The sample rate of the timer used to increase frequency
+ */
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+static unsigned long timer_rate = DEFAULT_TIMER_RATE;
+
+/*
+ * Wait this long before raising speed above hispeed, by default a single
+ * timer interval.
+ */
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
+
+/* Non-zero means indefinite speed boost active */
+static int boost_val;
+/* Duration of a boot pulse in usecs */
+static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+/* End time of boost pulse in ktime converted to usecs */
+static u64 boostpulse_endtime;
+
+/*
+ * Max additional time to wait in idle, beyond timer_rate, at speeds above
+ * minimum before wakeup to reduce speed, or -1 if unnecessary.
+ */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+static int timer_slack_val = DEFAULT_TIMER_SLACK;
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+ .name = "interactive",
+ .governor = cpufreq_governor_interactive,
+ .max_transition_latency = 10000000,
+ .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
+{
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long flags;
+
+ mod_timer_pinned(&pcpu->cpu_timer, expires);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+ }
+
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_targetload(unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
+ ;
+
+ ret = target_loads[i];
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+
+static unsigned int choose_freq(
+ struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
+{
+ unsigned int freq = pcpu->policy->cur;
+ unsigned int prevfreq, freqmin, freqmax;
+ unsigned int tl;
+ int index;
+
+ freqmin = 0;
+ freqmax = UINT_MAX;
+
+ do {
+ prevfreq = freq;
+ tl = freq_to_targetload(freq);
+
+ /*
+ * Find the lowest frequency where the computed load is less
+ * than or equal to the target load.
+ */
+
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq > prevfreq) {
+ /* The previous frequency is too low. */
+ freqmin = prevfreq;
+
+ if (freq >= freqmax) {
+ /*
+ * Find the highest frequency that is less
+ * than freqmax.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq == freqmin) {
+ /*
+ * The first frequency below freqmax
+ * has already been found to be too
+ * low. freqmax is the lowest speed
+ * we found that is fast enough.
+ */
+ freq = freqmax;
+ break;
+ }
+ }
+ } else if (freq < prevfreq) {
+ /* The previous frequency is high enough. */
+ freqmax = prevfreq;
+
+ if (freq <= freqmin) {
+ /*
+ * Find the lowest frequency that is higher
+ * than freqmin.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * If freqmax is the first frequency above
+ * freqmin then we have already found that
+ * this speed is fast enough.
+ */
+ if (freq == freqmax)
+ break;
+ }
+ }
+
+ /* If same frequency chosen as previous then done. */
+ } while (freq != prevfreq);
+
+ return freq;
+}
+
+static u64 update_load(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ u64 now;
+ u64 now_idle;
+ unsigned int delta_idle;
+ unsigned int delta_time;
+ u64 active_time;
+
+ now_idle = get_cpu_idle_time_us(cpu, &now);
+ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+ active_time = delta_time - delta_idle;
+ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+ pcpu->time_in_idle = now_idle;
+ pcpu->time_in_idle_timestamp = now;
+ return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ u64 now;
+ unsigned int delta_time;
+ u64 cputime_speedadj;
+ int cpu_load;
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ unsigned int new_freq;
+ unsigned int loadadjfreq;
+ unsigned int index;
+ unsigned long flags;
+ bool boosted;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled)
+ goto exit;
+
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ now = update_load(data);
+ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+ cputime_speedadj = pcpu->cputime_speedadj;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+ if (WARN_ON_ONCE(!delta_time))
+ goto rearm;
+
+ do_div(cputime_speedadj, delta_time);
+ loadadjfreq = (unsigned int)cputime_speedadj * 100;
+ cpu_load = loadadjfreq / pcpu->target_freq;
+ boosted = boost_val || now < boostpulse_endtime;
+
+ if (cpu_load >= go_hispeed_load || boosted) {
+ if (pcpu->target_freq < hispeed_freq) {
+ new_freq = hispeed_freq;
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
+
+ if (new_freq < hispeed_freq)
+ new_freq = hispeed_freq;
+ }
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
+ }
+
+ if (pcpu->target_freq >= hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+
+ pcpu->hispeed_validate_time = now;
+
+ if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+ new_freq, CPUFREQ_RELATION_L,
+ &index)) {
+ pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+ (int) data);
+ goto rearm;
+ }
+
+ new_freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * Do not scale below floor_freq unless we have been at or above the
+ * floor frequency for the minimum sample time since last validated.
+ */
+ if (new_freq < pcpu->floor_freq) {
+ if (now - pcpu->floor_validate_time < min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+ }
+
+ /*
+ * Update the timestamp for checking whether speed has been held at
+ * or above the selected frequency for a minimum of min_sample_time,
+ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
+ * allow the speed to drop as soon as the boostpulse duration expires
+ * (or the indefinite boost is turned off).
+ */
+
+ if (!boosted || new_freq > hispeed_freq) {
+ pcpu->floor_freq = new_freq;
+ pcpu->floor_validate_time = now;
+ }
+
+ if (pcpu->target_freq == new_freq) {
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm_if_notmax;
+ }
+
+ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ cpumask_set_cpu(data, &speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+ /*
+ * Already set max speed and don't see a need to change that,
+ * wait until next idle to re-evaluate, don't need timer.
+ */
+ if (pcpu->target_freq == pcpu->policy->max)
+ goto exit;
+
+rearm:
+ if (!timer_pending(&pcpu->cpu_timer))
+ cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+ up_read(&pcpu->enable_sem);
+ return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+ int pending;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
+
+ pending = timer_pending(&pcpu->cpu_timer);
+
+ if (pcpu->target_freq != pcpu->policy->min) {
+ /*
+ * Entering idle while not at lowest speed. On some
+ * platforms this can hold the other CPU(s) at that speed
+ * even though the CPU is idle. Set a timer to re-evaluate
+ * speed so this idle CPU doesn't hold the other CPUs above
+ * min indefinitely. This should probably be a quirk of
+ * the CPUFreq driver.
+ */
+ if (!pending)
+ cpufreq_interactive_timer_resched(pcpu);
+ }
+
+ up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
+
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ del_timer(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer(smp_processor_id());
+ }
+
+ up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+ if (cpumask_empty(&speedchange_cpumask)) {
+ spin_unlock_irqrestore(&speedchange_cpumask_lock,
+ flags);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ }
+
+ set_current_state(TASK_RUNNING);
+ tmp_mask = speedchange_cpumask;
+ cpumask_clear(&speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ unsigned int j;
+ unsigned int max_freq = 0;
+
+ pcpu = &per_cpu(cpuinfo, cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ continue;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ continue;
+ }
+
+ for_each_cpu(j, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, j);
+
+ if (pjcpu->target_freq > max_freq)
+ max_freq = pjcpu->target_freq;
+ }
+
+ if (max_freq != pcpu->policy->cur)
+ __cpufreq_driver_target(pcpu->policy,
+ max_freq,
+ CPUFREQ_RELATION_H);
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
+ pcpu->policy->cur);
+
+ up_read(&pcpu->enable_sem);
+ }
+ }
+
+ return 0;
+}
+
+static void cpufreq_interactive_boost(void)
+{
+ int i;
+ int anyboost = 0;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+ for_each_online_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+
+ if (pcpu->target_freq < hispeed_freq) {
+ pcpu->target_freq = hispeed_freq;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->hispeed_validate_time =
+ ktime_to_us(ktime_get());
+ anyboost = 1;
+ }
+
+ /*
+ * Set floor freq and (re)start timer for when last
+ * validated.
+ */
+
+ pcpu->floor_freq = hispeed_freq;
+ pcpu->floor_validate_time = ktime_to_us(ktime_get());
+ }
+
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+ if (anyboost)
+ wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+ struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ int cpu;
+ unsigned long flags;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ pcpu = &per_cpu(cpuinfo, freq->cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return 0;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return 0;
+ }
+
+ for_each_cpu(cpu, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, cpu);
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
+ update_load(cpu);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ }
+
+ up_read(&pcpu->enable_sem);
+ }
+ return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+ .notifier_call = cpufreq_interactive_notifier,
+};
+
+static ssize_t show_target_loads(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads; i++)
+ ret += sprintf(buf + ret, "%u%s", target_loads[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + ret, "\n");
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+static ssize_t store_target_loads(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ const char *cp;
+ unsigned int *new_target_loads = NULL;
+ int ntokens = 1;
+ int i;
+ unsigned long flags;
+
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err_inval;
+
+ new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!new_target_loads) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
+ goto err_inval;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_inval;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+ if (target_loads != default_target_loads)
+ kfree(target_loads);
+ target_loads = new_target_loads;
+ ntarget_loads = ntokens;
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return count;
+
+err_inval:
+ ret = -EINVAL;
+err:
+ kfree(new_target_loads);
+ return ret;
+}
+
+static struct global_attr target_loads_attr =
+ __ATTR(target_loads, S_IRUGO | S_IWUSR,
+ show_target_loads, store_target_loads);
+
+static ssize_t show_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ hispeed_freq = val;
+ return count;
+}
+
+static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
+ show_hispeed_freq, store_hispeed_freq);
+
+
+static ssize_t show_go_hispeed_load(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ go_hispeed_load = val;
+ return count;
+}
+
+static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
+ show_go_hispeed_load, store_go_hispeed_load);
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ min_sample_time = val;
+ return count;
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+ show_min_sample_time, store_min_sample_time);
+
+static ssize_t show_above_hispeed_delay(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", above_hispeed_delay_val);
+}
+
+static ssize_t store_above_hispeed_delay(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ above_hispeed_delay_val = val;
+ return count;
+}
+
+define_one_global_rw(above_hispeed_delay);
+
+static ssize_t show_timer_rate(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", timer_rate);
+}
+
+static ssize_t store_timer_rate(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ timer_rate = val;
+ return count;
+}
+
+static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
+ show_timer_rate, store_timer_rate);
+
+static ssize_t show_timer_slack(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", timer_slack_val);
+}
+
+static ssize_t store_timer_slack(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ timer_slack_val = val;
+ return count;
+}
+
+define_one_global_rw(timer_slack);
+
+static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", boost_val);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boost_val = val;
+
+ if (boost_val) {
+ trace_cpufreq_interactive_boost("on");
+ cpufreq_interactive_boost();
+ } else {
+ trace_cpufreq_interactive_unboost("off");
+ }
+
+ return count;
+}
+
+define_one_global_rw(boost);
+
+static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
+ trace_cpufreq_interactive_boost("pulse");
+ cpufreq_interactive_boost();
+ return count;
+}
+
+static struct global_attr boostpulse =
+ __ATTR(boostpulse, 0200, NULL, store_boostpulse);
+
+static ssize_t show_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_duration_val = val;
+ return count;
+}
+
+define_one_global_rw(boostpulse_duration);
+
+static struct attribute *interactive_attributes[] = {
+ &target_loads_attr.attr,
+ &hispeed_freq_attr.attr,
+ &go_hispeed_load_attr.attr,
+ &above_hispeed_delay.attr,
+ &min_sample_time_attr.attr,
+ &timer_rate_attr.attr,
+ &timer_slack.attr,
+ &boost.attr,
+ &boostpulse.attr,
+ &boostpulse_duration.attr,
+ NULL,
+};
+
+static struct attribute_group interactive_attr_group = {
+ .attrs = interactive_attributes,
+ .name = "interactive",
+};
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ cpufreq_interactive_idle_start();
+ break;
+ case IDLE_END:
+ cpufreq_interactive_idle_end();
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+ .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int rc;
+ unsigned int j;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_frequency_table *freq_table;
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(policy->cpu))
+ return -EINVAL;
+
+ mutex_lock(&gov_lock);
+
+ freq_table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
+
+ for_each_cpu(j, policy->cpus) {
+ unsigned long expires;
+
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->policy = policy;
+ pcpu->target_freq = policy->cur;
+ pcpu->freq_table = freq_table;
+ pcpu->floor_freq = pcpu->target_freq;
+ pcpu->floor_validate_time =
+ ktime_to_us(ktime_get());
+ pcpu->hispeed_validate_time =
+ pcpu->floor_validate_time;
+ down_write(&pcpu->enable_sem);
+ expires = jiffies + usecs_to_jiffies(timer_rate);
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, j);
+ if (timer_slack_val >= 0) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, j);
+ }
+ pcpu->governor_enabled = 1;
+ up_write(&pcpu->enable_sem);
+ }
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (++active_count > 1) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ if (rc) {
+ mutex_unlock(&gov_lock);
+ return rc;
+ }
+
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+ cpufreq_register_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ mutex_unlock(&gov_lock);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ mutex_lock(&gov_lock);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ down_write(&pcpu->enable_sem);
+ pcpu->governor_enabled = 0;
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ up_write(&pcpu->enable_sem);
+ }
+
+ if (--active_count > 0) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ cpufreq_unregister_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ mutex_unlock(&gov_lock);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+ unsigned int i;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ /* Initalize per-cpu timers */
+ for_each_possible_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ init_timer_deferrable(&pcpu->cpu_timer);
+ pcpu->cpu_timer.function = cpufreq_interactive_timer;
+ pcpu->cpu_timer.data = i;
+ init_timer(&pcpu->cpu_slack_timer);
+ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+ spin_lock_init(&pcpu->load_lock);
+ init_rwsem(&pcpu->enable_sem);
+ }
+
+ spin_lock_init(&target_loads_lock);
+ spin_lock_init(&speedchange_cpumask_lock);
+ mutex_init(&gov_lock);
+ speedchange_task =
+ kthread_create(cpufreq_interactive_speedchange_task, NULL,
+ "cfinteractive");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
+
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+ get_task_struct(speedchange_task);
+
+ /* NB: wake up so the thread does not look hung to the freezer */
+ wake_up_process(speedchange_task);
+
+ return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_interactive);
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+ "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 9d7732b81044..68b0c0c57c16 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -170,11 +170,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
if (stat) {
+ pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
}
- per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
@@ -183,8 +185,10 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
+ if (policy && (cpumask_weight(policy->cpus) == 1)) {
+ pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ }
if (policy)
cpufreq_cpu_put(policy);
}
@@ -262,6 +266,19 @@ error_get_fail:
return ret;
}
+static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+
+ pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
+ stat->cpu = policy->cpu;
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -269,6 +286,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
+
+ if (val == CPUFREQ_UPDATE_POLICY_CPU) {
+ cpufreq_stats_update_policy_cpu(policy);
+ return 0;
+ }
+
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);
@@ -316,6 +339,27 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
return 0;
}
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+ int ret = -ENODEV;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return -ENODEV;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (!table)
+ goto out;
+
+ ret = cpufreq_stats_create_table(policy, table);
+
+out:
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -335,6 +379,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ cpufreq_stats_create_table_cpu(cpu);
+ break;
}
return NOTIFY_OK;
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 49cda256efb2..aa5bd39d129e 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -227,6 +227,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
+}
+
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
return per_cpu(cpufreq_show_table, cpu);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index fb4a7dd57f94..e1f6860e069c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- int i, dead_state = -1;
- int power_usage = INT_MAX;
+ int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
-
- if (s->power_usage < power_usage && s->enter_dead) {
- power_usage = s->power_usage;
- dead_state = i;
- }
- }
-
- if (dead_state != -1)
- return drv->states[dead_state].enter_dead(dev, dead_state);
+ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ if (drv->states[i].enter_dead)
+ return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index c2b281afe0ed..422c7b69ba7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
-static void set_power_states(struct cpuidle_driver *drv)
-{
- int i;
-
- /*
- * cpuidle driver should set the drv->power_specified bit
- * before registering if the driver provides
- * power_usage numbers.
- *
- * If power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
- drv->states[i].power_usage = -1 - i;
-}
-
static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
drv->refcnt = 0;
-
- if (!drv->power_specified)
- set_power_states(drv);
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 20ea33afdda1..c42a8a11b860 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -187,7 +187,12 @@ static inline int performance_multiplier(void)
/* for higher loadavg, we are more reluctant */
- mult += 2 * get_loadavg();
+ /*
+ * this doesn't work as intended - it is almost always 0, but can
+ * sometimes, depending on workload, spike very high into the hundreds
+ * even when the average cpu load is under 10%.
+ */
+ /* mult += 2 * get_loadavg(); */
/* for IO wait tasks (per cpu!) we add 5x each */
mult += 10 * nr_iowait_cpu(smp_processor_id());
@@ -312,7 +317,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- int power_usage = INT_MAX;
int i;
int multiplier;
struct timespec t;
@@ -383,11 +387,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->exit_latency * multiplier > data->predicted_us)
continue;
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
- }
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
}
/* not deepest C-state chosen for low predicted residency */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 340942946106..428754af6236 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
- for (i = 0; i < drv->state_count; i++) {
+ for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
diff --git a/drivers/gator/Kconfig b/drivers/gator/Kconfig
new file mode 100644
index 000000000000..8ff2ca15566d
--- /dev/null
+++ b/drivers/gator/Kconfig
@@ -0,0 +1,33 @@
+config GATOR
+ tristate "Gator module for ARM's Streamline Performance Analyzer"
+ default m if ARM
+ depends on PROFILING
+ depends on HIGH_RES_TIMERS
+ depends on LOCAL_TIMERS || !(ARM && SMP)
+ select TRACING
+
+config GATOR_WITH_MALI_SUPPORT
+ bool
+
+choice
+ prompt "Enable Mali GPU support in Gator"
+ depends on GATOR
+ optional
+
+config GATOR_MALI_400MP
+ bool "Mali-400MP"
+ select GATOR_WITH_MALI_SUPPORT
+
+config GATOR_MALI_T6XX
+ bool "Mali-T604 or Mali-T658"
+ select GATOR_WITH_MALI_SUPPORT
+
+endchoice
+
+config GATOR_MALI_PATH
+ string "Path to Mali driver"
+ depends on GATOR_WITH_MALI_SUPPORT
+ default "drivers/gpu/arm/mali400mp"
+ help
+ The gator code adds this to its include path so it can get the Mali
+ trace headers with: #include "linux/mali_linux_trace.h"
diff --git a/drivers/gator/LICENSE b/drivers/gator/LICENSE
new file mode 100644
index 000000000000..d159169d1050
--- /dev/null
+++ b/drivers/gator/LICENSE
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/drivers/gator/Makefile b/drivers/gator/Makefile
new file mode 100644
index 000000000000..60849fc56edd
--- /dev/null
+++ b/drivers/gator/Makefile
@@ -0,0 +1,71 @@
+ifneq ($(KERNELRELEASE),)
+
+# Uncomment the following line to enable kernel stack unwinding within gator, or update gator_backtrace.c
+# EXTRA_CFLAGS += -DGATOR_KERNEL_STACK_UNWINDING
+
+obj-$(CONFIG_GATOR) := gator.o
+
+gator-y := gator_main.o \
+ gator_events_irq.o \
+ gator_events_sched.o \
+ gator_events_net.o \
+ gator_events_block.o \
+ gator_events_meminfo.o \
+ gator_events_perf_pmu.o
+
+gator-y += gator_events_mmaped.o
+
+ifeq ($(CONFIG_GATOR_WITH_MALI_SUPPORT),y)
+
+ifeq ($(CONFIG_GATOR_MALI_T6XX),y)
+gator-y += gator_events_mali_t6xx.o \
+ gator_events_mali_t6xx_hw.o
+include $(M)/mali_t6xx.mk
+else
+gator-y += gator_events_mali_400.o
+endif
+gator-y += gator_events_mali_common.o
+
+ccflags-y += -I$(CONFIG_GATOR_MALI_PATH)
+ccflags-$(CONFIG_GATOR_MALI_400MP) += -DMALI_SUPPORT=MALI_400
+ccflags-$(CONFIG_GATOR_MALI_T6XX) += -DMALI_SUPPORT=MALI_T6xx
+endif
+
+# GATOR_TEST controls whether to include (=1) or exclude (=0) test code.
+GATOR_TEST ?= 0
+EXTRA_CFLAGS += -DGATOR_TEST=$(GATOR_TEST)
+
+gator-$(CONFIG_ARM) += gator_events_armv6.o \
+ gator_events_armv7.o \
+ gator_events_l2c-310.o \
+ gator_events_scorpion.o
+
+$(obj)/gator_main.o: $(obj)/gator_events.h
+
+clean-files := gator_events.h
+
+# Note, in the recipe below we use "cd $(srctree) && cd $(src)" rather than
+# "cd $(srctree)/$(src)" because under DKMS $(src) is an absolute path, and we
+# can't just use $(src) because for normal kernel builds this is relative to
+# $(srctree)
+
+ chk_events.h = :
+ quiet_chk_events.h = echo ' CHK $@'
+silent_chk_events.h = :
+$(obj)/gator_events.h: FORCE
+ @$($(quiet)chk_events.h)
+ $(Q)cd $(srctree) && cd $(src) ; $(CONFIG_SHELL) gator_events.sh $(abspath $@)
+
+else
+
+all:
+ @echo
+ @echo "usage:"
+ @echo " make -C <kernel_build_dir> M=\`pwd\` ARCH=arm CROSS_COMPILE=<...> modules"
+ @echo
+ $(error)
+
+clean:
+ rm -f *.o .*.cmd gator_events.h modules.order Module.symvers gator.ko gator.mod.c
+
+endif
diff --git a/drivers/gator/gator.h b/drivers/gator/gator.h
new file mode 100644
index 000000000000..9a4617b2822a
--- /dev/null
+++ b/drivers/gator/gator.h
@@ -0,0 +1,121 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GATOR_H_
+#define GATOR_H_
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+
+#define GATOR_PERF_SUPPORT LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+#define GATOR_PERF_PMU_SUPPORT GATOR_PERF_SUPPORT && defined(CONFIG_PERF_EVENTS) && (!(defined(__arm__) || defined(__aarch64__)) || defined(CONFIG_HW_PERF_EVENTS))
+#define GATOR_NO_PERF_SUPPORT (!(GATOR_PERF_SUPPORT))
+#define GATOR_CPU_FREQ_SUPPORT (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)) && defined(CONFIG_CPU_FREQ)
+
+// cpu ids
+#define ARM1136 0xb36
+#define ARM1156 0xb56
+#define ARM1176 0xb76
+#define ARM11MPCORE 0xb02
+#define CORTEX_A5 0xc05
+#define CORTEX_A7 0xc07
+#define CORTEX_A8 0xc08
+#define CORTEX_A9 0xc09
+#define CORTEX_A15 0xc0f
+#define SCORPION 0x00f
+#define SCORPIONMP 0x02d
+#define KRAITSIM 0x049
+#define KRAIT 0x04d
+#define KRAIT_S4_PRO 0x06f
+#define CORTEX_A53 0xd03
+#define CORTEX_A57 0xd07
+#define AARCH64 0xd0f
+#define OTHER 0xfff
+
+#define MAXSIZE_CORE_NAME 32
+
+struct gator_cpu {
+ const int cpuid;
+ const char core_name[MAXSIZE_CORE_NAME];
+ const char * const pmnc_name;
+ const int pmnc_counters;
+ const int ccnt;
+};
+
+extern struct gator_cpu gator_cpus[];
+
+/******************************************************************************
+ * Filesystem
+ ******************************************************************************/
+int gatorfs_create_file_perm(struct super_block *sb, struct dentry *root,
+ char const *name,
+ const struct file_operations *fops, int perm);
+
+struct dentry *gatorfs_mkdir(struct super_block *sb, struct dentry *root,
+ char const *name);
+
+int gatorfs_create_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val);
+
+int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val);
+
+void gator_op_create_files(struct super_block *sb, struct dentry *root);
+
+/******************************************************************************
+ * Tracepoints
+ ******************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
+# error Kernels prior to 2.6.32 not supported
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+# define GATOR_DEFINE_PROBE(probe_name, proto) \
+ static void probe_##probe_name(PARAMS(proto))
+# define GATOR_REGISTER_TRACE(probe_name) \
+ register_trace_##probe_name(probe_##probe_name)
+# define GATOR_UNREGISTER_TRACE(probe_name) \
+ unregister_trace_##probe_name(probe_##probe_name)
+#else
+# define GATOR_DEFINE_PROBE(probe_name, proto) \
+ static void probe_##probe_name(void *data, PARAMS(proto))
+# define GATOR_REGISTER_TRACE(probe_name) \
+ register_trace_##probe_name(probe_##probe_name, NULL)
+# define GATOR_UNREGISTER_TRACE(probe_name) \
+ unregister_trace_##probe_name(probe_##probe_name, NULL)
+#endif
+
+/******************************************************************************
+ * Events
+ ******************************************************************************/
+struct gator_interface {
+ void (*shutdown)(void); // Complementary function to init
+ int (*create_files)(struct super_block *sb, struct dentry *root);
+ int (*start)(void);
+ void (*stop)(void); // Complementary function to start
+ int (*online)(int **buffer);
+ int (*offline)(int **buffer);
+ void (*online_dispatch)(int cpu); // called in process context but may not be running on core 'cpu'
+ void (*offline_dispatch)(int cpu); // called in process context but may not be running on core 'cpu'
+ int (*read)(int **buffer);
+ int (*read64)(long long **buffer);
+ struct list_head list;
+};
+
+// gator_events_init is used as a search term in gator_events.sh
+#define gator_events_init(initfn) \
+ static inline int __gator_events_init_test(void) \
+ { return initfn(); }
+
+int gator_events_install(struct gator_interface *interface);
+int gator_events_get_key(void);
+u32 gator_cpuid(void);
+
+void gator_backtrace_handler(struct pt_regs *const regs);
+
+#endif // GATOR_H_
diff --git a/drivers/gator/gator_annotate.c b/drivers/gator/gator_annotate.c
new file mode 100644
index 000000000000..42f9951ebed1
--- /dev/null
+++ b/drivers/gator/gator_annotate.c
@@ -0,0 +1,168 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(annotate_lock);
+static bool collect_annotations = false;
+
+static int annotate_copy(struct file *file, char const __user *buf, size_t count)
+{
+ int cpu = 0;
+ int write = per_cpu(gator_buffer_write, cpu)[ANNOTATE_BUF];
+
+ if (file == NULL) {
+ // copy from kernel
+ memcpy(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count);
+ } else {
+ // copy from user space
+ if (copy_from_user(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count) != 0)
+ return -1;
+ }
+ per_cpu(gator_buffer_write, cpu)[ANNOTATE_BUF] = (write + count) & gator_buffer_mask[ANNOTATE_BUF];
+
+ return 0;
+}
+
+static ssize_t annotate_write(struct file *file, char const __user *buf, size_t count_orig, loff_t *offset)
+{
+ int pid, cpu, header_size, available, contiguous, length1, length2, size, count = count_orig & 0x7fffffff;
+
+ if (*offset) {
+ return -EINVAL;
+ }
+
+ // Annotations are not supported in interrupt context
+ if (in_interrupt()) {
+ printk(KERN_WARNING "gator: Annotations are not supported in interrupt context\n");
+ return -EINVAL;
+ }
+
+ // synchronize between cores and with collect_annotations
+ spin_lock(&annotate_lock);
+
+ if (!collect_annotations) {
+ // Not collecting annotations, tell the caller everything was written
+ size = count_orig;
+ goto annotate_write_out;
+ }
+
+ // Annotation only uses a single per-cpu buffer as the data must be in order to the engine
+ cpu = 0;
+
+ if (current == NULL) {
+ pid = 0;
+ } else {
+ pid = current->pid;
+ }
+
+ // determine total size of the payload
+ header_size = MAXSIZE_PACK32 * 3 + MAXSIZE_PACK64;
+ available = buffer_bytes_available(cpu, ANNOTATE_BUF) - header_size;
+ size = count < available ? count : available;
+
+ if (size <= 0) {
+ // Buffer is full but don't return an error. Instead return 0 so the
+ // caller knows nothing was written and they can try again.
+ size = 0;
+ goto annotate_write_out;
+ }
+
+ // synchronize shared variables annotateBuf and annotatePos
+ if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF]) {
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, smp_processor_id());
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
+ gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, size);
+
+ // determine the sizes to capture, length1 + length2 will equal size
+ contiguous = contiguous_space_available(cpu, ANNOTATE_BUF);
+ if (size < contiguous) {
+ length1 = size;
+ length2 = 0;
+ } else {
+ length1 = contiguous;
+ length2 = size - contiguous;
+ }
+
+ if (annotate_copy(file, buf, length1) != 0) {
+ size = -EINVAL;
+ goto annotate_write_out;
+ }
+
+ if (length2 > 0 && annotate_copy(file, &buf[length1], length2) != 0) {
+ size = -EINVAL;
+ goto annotate_write_out;
+ }
+
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, ANNOTATE_BUF);
+ }
+
+annotate_write_out:
+ spin_unlock(&annotate_lock);
+
+ // return the number of bytes written
+ return size;
+}
+
+#include "gator_annotate_kernel.c"
+
+static int annotate_release(struct inode *inode, struct file *file)
+{
+ int cpu = 0;
+
+ // synchronize between cores
+ spin_lock(&annotate_lock);
+
+ if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF] && buffer_check_space(cpu, ANNOTATE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
+ uint32_t pid = current->pid;
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, smp_processor_id());
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
+ gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, 0); // time
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, 0); // size
+ }
+
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, ANNOTATE_BUF);
+
+ spin_unlock(&annotate_lock);
+
+ return 0;
+}
+
+static const struct file_operations annotate_fops = {
+ .write = annotate_write,
+ .release = annotate_release
+};
+
+static int gator_annotate_create_files(struct super_block *sb, struct dentry *root)
+{
+ return gatorfs_create_file_perm(sb, root, "annotate", &annotate_fops, 0666);
+}
+
+static int gator_annotate_start(void)
+{
+ collect_annotations = true;
+ return 0;
+}
+
+static void gator_annotate_stop(void)
+{
+ // the spinlock here will ensure that when this function exits, we are not in the middle of an annotation
+ spin_lock(&annotate_lock);
+ collect_annotations = false;
+ spin_unlock(&annotate_lock);
+}
diff --git a/drivers/gator/gator_annotate_kernel.c b/drivers/gator/gator_annotate_kernel.c
new file mode 100644
index 000000000000..67d2d6cc09f6
--- /dev/null
+++ b/drivers/gator/gator_annotate_kernel.c
@@ -0,0 +1,157 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define ESCAPE_CODE 0x1c
+#define STRING_ANNOTATION 0x06
+#define NAME_CHANNEL_ANNOTATION 0x07
+#define NAME_GROUP_ANNOTATION 0x08
+#define VISUAL_ANNOTATION 0x04
+#define MARKER_ANNOTATION 0x05
+
+static void kannotate_write(const char *ptr, unsigned int size)
+{
+ int retval;
+ int pos = 0;
+ loff_t offset = 0;
+ while (pos < size) {
+ retval = annotate_write(NULL, &ptr[pos], size - pos, &offset);
+ if (retval < 0) {
+ printk(KERN_WARNING "gator: kannotate_write failed with return value %d\n", retval);
+ return;
+ }
+ pos += retval;
+ }
+}
+
+void gator_annotate_channel(int channel, const char *str)
+{
+ int str_size = strlen(str) & 0xffff;
+ long long header = ESCAPE_CODE | (STRING_ANNOTATION << 8) | (channel << 16) | ((long long)str_size << 48);
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+
+EXPORT_SYMBOL(gator_annotate_channel);
+
+void gator_annotate(const char *str)
+{
+ gator_annotate_channel(0, str);
+}
+
+EXPORT_SYMBOL(gator_annotate);
+
+void gator_annotate_channel_color(int channel, int color, const char *str)
+{
+ int str_size = (strlen(str) + 4) & 0xffff;
+ char header[12];
+ header[0] = ESCAPE_CODE;
+ header[1] = STRING_ANNOTATION;
+ *(u32 *)(&header[2]) = channel;
+ *(u16 *)(&header[6]) = str_size;
+ *(u32 *)(&header[8]) = color;
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size - 4);
+}
+
+EXPORT_SYMBOL(gator_annotate_channel_color);
+
+void gator_annotate_color(int color, const char *str)
+{
+ gator_annotate_channel_color(0, color, str);
+}
+
+EXPORT_SYMBOL(gator_annotate_color);
+
+void gator_annotate_channel_end(int channel)
+{
+ long long header = ESCAPE_CODE | (STRING_ANNOTATION << 8) | (channel << 16);
+ kannotate_write((char *)&header, sizeof(header));
+}
+
+EXPORT_SYMBOL(gator_annotate_channel_end);
+
+void gator_annotate_end(void)
+{
+ gator_annotate_channel_end(0);
+}
+
+EXPORT_SYMBOL(gator_annotate_end);
+
+void gator_annotate_name_channel(int channel, int group, const char* str)
+{
+ int str_size = strlen(str) & 0xffff;
+ char header[12];
+ header[0] = ESCAPE_CODE;
+ header[1] = NAME_CHANNEL_ANNOTATION;
+ *(u32 *)(&header[2]) = channel;
+ *(u32 *)(&header[6]) = group;
+ *(u16 *)(&header[10]) = str_size;
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+
+EXPORT_SYMBOL(gator_annotate_name_channel);
+
+void gator_annotate_name_group(int group, const char* str)
+{
+ int str_size = strlen(str) & 0xffff;
+ long long header = ESCAPE_CODE | (NAME_GROUP_ANNOTATION << 8) | (group << 16) | ((long long)str_size << 48);
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+
+EXPORT_SYMBOL(gator_annotate_name_group);
+
+void gator_annotate_visual(const char *data, unsigned int length, const char *str)
+{
+ int str_size = strlen(str) & 0xffff;
+ int visual_annotation = ESCAPE_CODE | (VISUAL_ANNOTATION << 8) | (str_size << 16);
+ kannotate_write((char *)&visual_annotation, sizeof(visual_annotation));
+ kannotate_write(str, str_size);
+ kannotate_write((char *)&length, sizeof(length));
+ kannotate_write(data, length);
+}
+
+EXPORT_SYMBOL(gator_annotate_visual);
+
+void gator_annotate_marker(void)
+{
+ int header = ESCAPE_CODE | (MARKER_ANNOTATION << 8);
+ kannotate_write((char *)&header, sizeof(header));
+}
+
+EXPORT_SYMBOL(gator_annotate_marker);
+
+void gator_annotate_marker_str(const char *str)
+{
+ int str_size = strlen(str) & 0xffff;
+ int header = ESCAPE_CODE | (MARKER_ANNOTATION << 8) | (str_size << 16);
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+
+EXPORT_SYMBOL(gator_annotate_marker_str);
+
+void gator_annotate_marker_color(int color)
+{
+ long long header = (ESCAPE_CODE | (MARKER_ANNOTATION << 8) | 0x00040000 | ((long long)color << 32));
+ kannotate_write((char *)&header, sizeof(header));
+}
+
+EXPORT_SYMBOL(gator_annotate_marker_color);
+
+void gator_annotate_marker_color_str(int color, const char *str)
+{
+ int str_size = (strlen(str) + 4) & 0xffff;
+ long long header = ESCAPE_CODE | (MARKER_ANNOTATION << 8) | (str_size << 16) | ((long long)color << 32);
+ kannotate_write((char *)&header, sizeof(header));
+ kannotate_write(str, str_size - 4);
+}
+
+EXPORT_SYMBOL(gator_annotate_marker_color_str);
diff --git a/drivers/gator/gator_backtrace.c b/drivers/gator/gator_backtrace.c
new file mode 100644
index 000000000000..e6125b34605a
--- /dev/null
+++ b/drivers/gator/gator_backtrace.c
@@ -0,0 +1,127 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * EABI backtrace stores {fp,lr} on the stack.
+ */
+struct frame_tail_eabi {
+ unsigned long fp; // points to prev_lr
+ unsigned long lr;
+};
+
+static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int depth)
+{
+#if defined(__arm__) || defined(__aarch64__)
+ struct frame_tail_eabi *tail;
+ struct frame_tail_eabi *next;
+ struct frame_tail_eabi *ptrtail;
+ struct frame_tail_eabi buftail;
+#if defined(__arm__)
+ unsigned long fp = regs->ARM_fp;
+ unsigned long sp = regs->ARM_sp;
+ unsigned long lr = regs->ARM_lr;
+ const int frame_offset = 4;
+#else
+ unsigned long fp = regs->regs[29];
+ unsigned long sp = regs->sp;
+ unsigned long lr = regs->regs[30];
+ const int frame_offset = 0;
+#endif
+ int is_user_mode = user_mode(regs);
+
+ if (!is_user_mode) {
+ return;
+ }
+
+ /* entry preamble may not have executed */
+ gator_add_trace(cpu, lr);
+
+ /* check tail is valid */
+ if (fp == 0 || fp < sp) {
+ return;
+ }
+
+ tail = (struct frame_tail_eabi *)(fp - frame_offset);
+
+ while (depth-- && tail && !((unsigned long)tail & 3)) {
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(struct frame_tail_eabi)))
+ return;
+ if (__copy_from_user_inatomic(&buftail, tail, sizeof(struct frame_tail_eabi)))
+ return;
+ ptrtail = &buftail;
+
+ lr = ptrtail[0].lr;
+ gator_add_trace(cpu, lr);
+
+ /* frame pointers should progress back up the stack, towards higher addresses */
+ next = (struct frame_tail_eabi *)(lr - frame_offset);
+ if (tail >= next || lr == 0) {
+ fp = ptrtail[0].fp;
+ next = (struct frame_tail_eabi *)(fp - frame_offset);
+ /* check tail is valid */
+ if (tail >= next || fp == 0) {
+ return;
+ }
+ }
+
+ tail = next;
+ }
+#endif
+}
+
+#if defined(__arm__) || defined(__aarch64__)
+static int report_trace(struct stackframe *frame, void *d)
+{
+ struct module *mod;
+ unsigned int *depth = d, cookie = NO_COOKIE, cpu = smp_processor_id();
+ unsigned long addr = frame->pc;
+
+ if (*depth) {
+ mod = __module_address(addr);
+ if (mod) {
+ cookie = get_cookie(cpu, current, mod->name, false);
+ addr = addr - (unsigned long)mod->module_core;
+ }
+ marshal_backtrace(addr & ~1, cookie);
+ (*depth)--;
+ }
+
+ return *depth == 0;
+}
+#endif
+
+// Uncomment the following line to enable kernel stack unwinding within gator, note it can also be defined from the Makefile
+// #define GATOR_KERNEL_STACK_UNWINDING
+static void kernel_backtrace(int cpu, struct pt_regs *const regs)
+{
+#if defined(__arm__) || defined(__aarch64__)
+#ifdef GATOR_KERNEL_STACK_UNWINDING
+ int depth = gator_backtrace_depth;
+#else
+ int depth = 1;
+#endif
+ struct stackframe frame;
+ if (depth == 0)
+ depth = 1;
+#if defined(__arm__)
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+#else
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+#endif
+ walk_stackframe(&frame, report_trace, &depth);
+#else
+ marshal_backtrace(PC_REG & ~1, NO_COOKIE);
+#endif
+}
diff --git a/drivers/gator/gator_cookies.c b/drivers/gator/gator_cookies.c
new file mode 100644
index 000000000000..bb401bba8275
--- /dev/null
+++ b/drivers/gator/gator_cookies.c
@@ -0,0 +1,397 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define COOKIEMAP_ENTRIES 1024 /* must be power of 2 */
+#define TRANSLATE_SIZE 256
+#define MAX_COLLISIONS 2
+
+static uint32_t *gator_crc32_table;
+static unsigned int translate_buffer_mask;
+
+static DEFINE_PER_CPU(char *, translate_text);
+static DEFINE_PER_CPU(uint32_t, cookie_next_key);
+static DEFINE_PER_CPU(uint64_t *, cookie_keys);
+static DEFINE_PER_CPU(uint32_t *, cookie_values);
+static DEFINE_PER_CPU(int, translate_buffer_read);
+static DEFINE_PER_CPU(int, translate_buffer_write);
+static DEFINE_PER_CPU(void **, translate_buffer);
+
+static inline uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq);
+static void wq_cookie_handler(struct work_struct *unused);
+DECLARE_WORK(cookie_work, wq_cookie_handler);
+static struct timer_list app_process_wake_up_timer;
+static void app_process_wake_up_handler(unsigned long unused_data);
+
+static uint32_t cookiemap_code(uint64_t value64)
+{
+ uint32_t value = (uint32_t)((value64 >> 32) + value64);
+ uint32_t cookiecode = (value >> 24) & 0xff;
+ cookiecode = cookiecode * 31 + ((value >> 16) & 0xff);
+ cookiecode = cookiecode * 31 + ((value >> 8) & 0xff);
+ cookiecode = cookiecode * 31 + ((value >> 0) & 0xff);
+ cookiecode &= (COOKIEMAP_ENTRIES - 1);
+ return cookiecode * MAX_COLLISIONS;
+}
+
+static uint32_t gator_chksum_crc32(const char *data)
+{
+ register unsigned long crc;
+ const unsigned char *block = data;
+ int i, length = strlen(data);
+
+ crc = 0xFFFFFFFF;
+ for (i = 0; i < length; i++) {
+ crc = ((crc >> 8) & 0x00FFFFFF) ^ gator_crc32_table[(crc ^ *block++) & 0xFF];
+ }
+
+ return (crc ^ 0xFFFFFFFF);
+}
+
+/*
+ * Exists
+ * Pre: [0][1][v][3]..[n-1]
+ * Post: [v][0][1][3]..[n-1]
+ */
+static uint32_t cookiemap_exists(uint64_t key)
+{
+ unsigned long x, flags, retval = 0;
+ int cpu = smp_processor_id();
+ uint32_t cookiecode = cookiemap_code(key);
+ uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]);
+ uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]);
+
+ // Can be called from interrupt handler or from work queue
+ local_irq_save(flags);
+ for (x = 0; x < MAX_COLLISIONS; x++) {
+ if (keys[x] == key) {
+ uint32_t value = values[x];
+ for (; x > 0; x--) {
+ keys[x] = keys[x - 1];
+ values[x] = values[x - 1];
+ }
+ keys[0] = key;
+ values[0] = value;
+ retval = value;
+ break;
+ }
+ }
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+/*
+ * Add
+ * Pre: [0][1][2][3]..[n-1]
+ * Post: [v][0][1][2]..[n-2]
+ */
+static void cookiemap_add(uint64_t key, uint32_t value)
+{
+ int cpu = smp_processor_id();
+ int cookiecode = cookiemap_code(key);
+ uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]);
+ uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]);
+ int x;
+
+ for (x = MAX_COLLISIONS - 1; x > 0; x--) {
+ keys[x] = keys[x - 1];
+ values[x] = values[x - 1];
+ }
+ keys[0] = key;
+ values[0] = value;
+}
+
+static void translate_buffer_write_ptr(int cpu, void *x)
+{
+ per_cpu(translate_buffer, cpu)[per_cpu(translate_buffer_write, cpu)++] = x;
+ per_cpu(translate_buffer_write, cpu) &= translate_buffer_mask;
+}
+
+static void *translate_buffer_read_ptr(int cpu)
+{
+ void *value = per_cpu(translate_buffer, cpu)[per_cpu(translate_buffer_read, cpu)++];
+ per_cpu(translate_buffer_read, cpu) &= translate_buffer_mask;
+ return value;
+}
+
+static void wq_cookie_handler(struct work_struct *unused)
+{
+ struct task_struct *task;
+ char *text;
+ int cpu = smp_processor_id();
+ unsigned int commit;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started != 0) {
+ commit = per_cpu(translate_buffer_write, cpu);
+ while (per_cpu(translate_buffer_read, cpu) != commit) {
+ task = (struct task_struct *)translate_buffer_read_ptr(cpu);
+ text = (char *)translate_buffer_read_ptr(cpu);
+ get_cookie(cpu, task, text, true);
+ }
+ }
+
+ mutex_unlock(&start_mutex);
+}
+
+static void app_process_wake_up_handler(unsigned long unused_data)
+{
+ // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+ schedule_work(&cookie_work);
+}
+
+// Retrieve full name from proc/pid/cmdline for java processes on Android
+static int translate_app_process(const char **text, int cpu, struct task_struct *task, bool from_wq)
+{
+ void *maddr;
+ unsigned int len;
+ unsigned long addr;
+ struct mm_struct *mm;
+ struct page *page = NULL;
+ struct vm_area_struct *page_vma;
+ int bytes, offset, retval = 0, ptr;
+ char *buf = per_cpu(translate_text, cpu);
+
+ // Push work into a work queue if in atomic context as the kernel functions below might sleep
+ // Rely on the in_interrupt variable rather than in_irq() or in_interrupt() kernel functions, as the value of these functions seems
+ // inconsistent during a context switch between android/linux versions
+ if (!from_wq) {
+ // Check if already in buffer
+ ptr = per_cpu(translate_buffer_read, cpu);
+ while (ptr != per_cpu(translate_buffer_write, cpu)) {
+ if (per_cpu(translate_buffer, cpu)[ptr] == (void *)task)
+ goto out;
+ ptr = (ptr + 2) & translate_buffer_mask;
+ }
+
+ translate_buffer_write_ptr(cpu, (void *)task);
+ translate_buffer_write_ptr(cpu, (void *)*text);
+
+ mod_timer(&app_process_wake_up_timer, jiffies + 1);
+ goto out;
+ }
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto outmm;
+ addr = mm->arg_start;
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > TRANSLATE_SIZE)
+ len = TRANSLATE_SIZE;
+
+ down_read(&mm->mmap_sem);
+ while (len) {
+ if (get_user_pages(task, mm, addr, 1, 0, 1, &page, &page_vma) <= 0)
+ goto outsem;
+
+ maddr = kmap(page);
+ offset = addr & (PAGE_SIZE - 1);
+ bytes = len;
+ if (bytes > PAGE_SIZE - offset)
+ bytes = PAGE_SIZE - offset;
+
+ copy_from_user_page(page_vma, page, addr, buf, maddr + offset, bytes);
+
+ kunmap(page); // release page allocated by get_user_pages()
+ page_cache_release(page);
+
+ len -= bytes;
+ buf += bytes;
+ addr += bytes;
+
+ *text = per_cpu(translate_text, cpu);
+ retval = 1;
+ }
+
+ // On app_process startup, /proc/pid/cmdline is initially "zygote" then "<pre-initialized>" but changes after an initial startup period
+ if (strcmp(*text, "zygote") == 0 || strcmp(*text, "<pre-initialized>") == 0)
+ retval = 0;
+
+outsem:
+ up_read(&mm->mmap_sem);
+outmm:
+ mmput(mm);
+out:
+ return retval;
+}
+
+static inline uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq)
+{
+ unsigned long flags, cookie;
+ uint64_t key;
+
+ key = gator_chksum_crc32(text);
+ key = (key << 32) | (uint32_t)task->tgid;
+
+ cookie = cookiemap_exists(key);
+ if (cookie) {
+ return cookie;
+ }
+
+ if (strcmp(text, "app_process") == 0) {
+ if (!translate_app_process(&text, cpu, task, from_wq))
+ return INVALID_COOKIE;
+ }
+
+ // Can be called from interrupt handler or from work queue or from scheduler trace
+ local_irq_save(flags);
+
+ cookie = INVALID_COOKIE;
+ if (marshal_cookie_header(text)) {
+ cookie = per_cpu(cookie_next_key, cpu) += nr_cpu_ids;
+ cookiemap_add(key, cookie);
+ marshal_cookie(cookie, text);
+ }
+
+ local_irq_restore(flags);
+
+ return cookie;
+}
+
+static int get_exec_cookie(int cpu, struct task_struct *task)
+{
+ struct mm_struct *mm = task->mm;
+ const char *text;
+
+ // kernel threads have no address space
+ if (!mm)
+ return NO_COOKIE;
+
+ if (task && task->mm && task->mm->exe_file) {
+ text = task->mm->exe_file->f_path.dentry->d_name.name;
+ return get_cookie(cpu, task, text, false);
+ }
+
+ return INVALID_COOKIE;
+}
+
+static unsigned long get_address_cookie(int cpu, struct task_struct *task, unsigned long addr, off_t *offset)
+{
+ unsigned long cookie = NO_COOKIE;
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+ const char *text;
+
+ if (!mm)
+ return cookie;
+
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ continue;
+
+ if (vma->vm_file) {
+ text = vma->vm_file->f_path.dentry->d_name.name;
+ cookie = get_cookie(cpu, task, text, false);
+ *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start;
+ } else {
+ /* must be an anonymous map */
+ *offset = addr;
+ }
+
+ break;
+ }
+
+ if (!vma)
+ cookie = INVALID_COOKIE;
+
+ return cookie;
+}
+
+static int cookies_initialize(void)
+{
+ uint32_t crc, poly;
+ int i, j, cpu, size, err = 0;
+
+ int translate_buffer_size = 512; // must be a power of 2
+ translate_buffer_mask = translate_buffer_size / sizeof(per_cpu(translate_buffer, 0)[0]) - 1;
+
+ for_each_present_cpu(cpu) {
+ per_cpu(cookie_next_key, cpu) = nr_cpu_ids + cpu;
+
+ size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint64_t);
+ per_cpu(cookie_keys, cpu) = (uint64_t *)kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(cookie_keys, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ memset(per_cpu(cookie_keys, cpu), 0, size);
+
+ size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint32_t);
+ per_cpu(cookie_values, cpu) = (uint32_t *)kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(cookie_values, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ memset(per_cpu(cookie_values, cpu), 0, size);
+
+ per_cpu(translate_buffer, cpu) = (void **)kmalloc(translate_buffer_size, GFP_KERNEL);
+ if (!per_cpu(translate_buffer, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+
+ per_cpu(translate_buffer_write, cpu) = 0;
+ per_cpu(translate_buffer_read, cpu) = 0;
+
+ per_cpu(translate_text, cpu) = (char *)kmalloc(TRANSLATE_SIZE, GFP_KERNEL);
+ if (!per_cpu(translate_text, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ }
+
+ // build CRC32 table
+ poly = 0x04c11db7;
+ gator_crc32_table = (uint32_t *)kmalloc(256 * sizeof(uint32_t), GFP_KERNEL);
+ for (i = 0; i < 256; i++) {
+ crc = i;
+ for (j = 8; j > 0; j--) {
+ if (crc & 1) {
+ crc = (crc >> 1) ^ poly;
+ } else {
+ crc >>= 1;
+ }
+ }
+ gator_crc32_table[i] = crc;
+ }
+
+ setup_timer(&app_process_wake_up_timer, app_process_wake_up_handler, 0);
+
+cookie_setup_error:
+ return err;
+}
+
+static void cookies_release(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ kfree(per_cpu(cookie_keys, cpu));
+ per_cpu(cookie_keys, cpu) = NULL;
+
+ kfree(per_cpu(cookie_values, cpu));
+ per_cpu(cookie_values, cpu) = NULL;
+
+ kfree(per_cpu(translate_buffer, cpu));
+ per_cpu(translate_buffer, cpu) = NULL;
+ per_cpu(translate_buffer_read, cpu) = 0;
+ per_cpu(translate_buffer_write, cpu) = 0;
+
+ kfree(per_cpu(translate_text, cpu));
+ per_cpu(translate_text, cpu) = NULL;
+ }
+
+ del_timer_sync(&app_process_wake_up_timer);
+ kfree(gator_crc32_table);
+ gator_crc32_table = NULL;
+}
diff --git a/drivers/gator/gator_events.sh b/drivers/gator/gator_events.sh
new file mode 100644
index 000000000000..5467dd6d17d6
--- /dev/null
+++ b/drivers/gator/gator_events.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+EVENTS=`grep gator_events_init *.c | sed 's/.\+gator_events_init(\(.\+\)).\+/\1/'`
+
+(
+ echo /\* This file is auto generated \*/
+ echo
+ for EVENT in $EVENTS; do
+ echo __weak int $EVENT\(void\)\;
+ done
+ echo
+ echo static int \(*gator_events_list[]\)\(void\) = {
+ for EVENT in $EVENTS; do
+ echo \ $EVENT,
+ done
+ echo }\;
+) > $1.tmp
+
+cmp -s $1 $1.tmp && rm $1.tmp || mv $1.tmp $1
diff --git a/drivers/gator/gator_events_armv6.c b/drivers/gator/gator_events_armv6.c
new file mode 100644
index 000000000000..ee36dd0772d6
--- /dev/null
+++ b/drivers/gator/gator_events_armv6.c
@@ -0,0 +1,244 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+// gator_events_perf_pmu.c is used if perf is supported
+#if GATOR_NO_PERF_SUPPORT
+
+static const char *pmnc_name;
+
+/*
+ * Per-CPU PMCR
+ */
+#define PMCR_E (1 << 0) /* Enable */
+#define PMCR_P (1 << 1) /* Count reset */
+#define PMCR_C (1 << 2) /* Cycle counter reset */
+#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
+#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
+#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
+
+#define PMN0 0
+#define PMN1 1
+#define CCNT 2
+#define CNTMAX (CCNT+1)
+
+static int pmnc_counters = 0;
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+static inline void armv6_pmnc_write(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0x0ffff77f;
+ asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
+}
+
+static inline u32 armv6_pmnc_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
+ return val;
+}
+
+static void armv6_pmnc_reset_counter(unsigned int cnt)
+{
+ u32 val = 0;
+ switch (cnt) {
+ case CCNT:
+ asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
+ break;
+ case PMN0:
+ asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
+ break;
+ case PMN1:
+ asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
+ break;
+ }
+}
+
+int gator_events_armv6_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ pmnc_counters = 3;
+
+ for (i = PMN0; i <= CCNT; i++) {
+ char buf[40];
+ if (i == CCNT) {
+ snprintf(buf, sizeof buf, "ARM_%s_ccnt", pmnc_name);
+ } else {
+ snprintf(buf, sizeof buf, "ARM_%s_cnt%d", pmnc_name, i);
+ }
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i != CCNT) {
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int gator_events_armv6_online(int **buffer)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+ u32 pmnc;
+
+ if (armv6_pmnc_read() & PMCR_E) {
+ armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
+ }
+
+ /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
+ armv6_pmnc_write(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
+ PMCR_C | PMCR_P);
+
+ /* configure control register */
+ for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ event = pmnc_event[cnt] & 255;
+
+ // Set event (if destined for PMNx counters)
+ if (cnt == PMN0) {
+ pmnc |= event << 20;
+ } else if (cnt == PMN1) {
+ pmnc |= event << 12;
+ }
+
+ // Reset counter
+ armv6_pmnc_reset_counter(cnt);
+ }
+ armv6_pmnc_write(pmnc | PMCR_E);
+
+ // return zero values, no need to read as the counters were just reset
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_armv6_offline(int **buffer)
+{
+ unsigned int cnt;
+
+ armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ armv6_pmnc_reset_counter(cnt);
+ }
+
+ return 0;
+}
+
+static void gator_events_armv6_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_armv6_read(int **buffer)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
+ if (!(armv6_pmnc_read() & PMCR_E)) {
+ return 0;
+ }
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ u32 value = 0;
+ switch (cnt) {
+ case CCNT:
+ asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r" (value));
+ break;
+ case PMN0:
+ asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r" (value));
+ break;
+ case PMN1:
+ asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r" (value));
+ break;
+ }
+ armv6_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_armv6_interface = {
+ .create_files = gator_events_armv6_create_files,
+ .stop = gator_events_armv6_stop,
+ .online = gator_events_armv6_online,
+ .offline = gator_events_armv6_offline,
+ .read = gator_events_armv6_read,
+};
+
+int gator_events_armv6_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case ARM1136:
+ case ARM1156:
+ case ARM1176:
+ pmnc_name = "ARM11";
+ break;
+ case ARM11MPCORE:
+ pmnc_name = "ARM11MPCore";
+ break;
+ default:
+ return -1;
+ }
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_armv6_interface);
+}
+
+gator_events_init(gator_events_armv6_init);
+
+#else
+int gator_events_armv6_init(void)
+{
+ return -1;
+}
+#endif
diff --git a/drivers/gator/gator_events_armv7.c b/drivers/gator/gator_events_armv7.c
new file mode 100644
index 000000000000..212b17b5d29c
--- /dev/null
+++ b/drivers/gator/gator_events_armv7.c
@@ -0,0 +1,319 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Disabling interrupts
+ * Many of the functions below disable interrupts via local_irq_save(). This disabling of interrupts is done to prevent any race conditions
+ * between multiple entities (e.g. hrtimer interrupts and event based interrupts) calling the same functions. As accessing the pmu involves
+ * several steps (disable, select, read, enable), these steps must be performed atomically. Normal synchronization routines cannot be used
+ * as these functions are being called from interrupt context.
+ */
+
+#include "gator.h"
+
+// gator_events_perf_pmu.c is used if perf is supported
+#if GATOR_NO_PERF_SUPPORT
+
+// Per-CPU PMNC: config reg
+#define PMNC_E (1 << 0) /* Enable all counters */
+#define PMNC_P (1 << 1) /* Reset all counters */
+#define PMNC_C (1 << 2) /* Cycle counter reset */
+#define PMNC_MASK 0x3f /* Mask for writable bits */
+
+// ccnt reg
+#define CCNT_REG (1 << 31)
+
+#define CCNT 0
+#define CNT0 1
+#define CNTMAX (6+1)
+
+static const char *pmnc_name;
+static int pmnc_counters;
+
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+inline void armv7_pmnc_write(u32 val)
+{
+ val &= PMNC_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+inline u32 armv7_pmnc_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+inline u32 armv7_ccnt_read(u32 reset_value)
+{
+ unsigned long flags;
+ u32 newval = -reset_value;
+ u32 den = CCNT_REG;
+ u32 val;
+
+ local_irq_save(flags);
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den)); // disable
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); // read
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (newval)); // new value
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den)); // enable
+ local_irq_restore(flags);
+
+ return val;
+}
+
+inline u32 armv7_cntn_read(unsigned int cnt, u32 reset_value)
+{
+ unsigned long flags;
+ u32 newval = -reset_value;
+ u32 sel = (cnt - CNT0);
+ u32 den = 1 << sel;
+ u32 oldval;
+
+ local_irq_save(flags);
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den)); // disable
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (sel)); // select
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (oldval)); // read
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (newval)); // new value
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den)); // enable
+ local_irq_restore(flags);
+
+ return oldval;
+}
+
+static inline void armv7_pmnc_disable_interrupt(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : (1 << 31);
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+}
+
+inline u32 armv7_pmnc_reset_interrupt(void)
+{
+ // Get and reset overflow status flags
+ u32 flags;
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (flags));
+ flags &= 0x8000003f;
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (flags));
+ return flags;
+}
+
+static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+ return cnt;
+}
+
+static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+ return cnt;
+}
+
+static inline int armv7_pmnc_select_counter(unsigned int cnt)
+{
+ u32 val = (cnt - CNT0);
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+ return cnt;
+}
+
+static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
+{
+ if (armv7_pmnc_select_counter(cnt) == cnt) {
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+ }
+}
+
+static int gator_events_armv7_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < pmnc_counters; i++) {
+ char buf[40];
+ if (i == 0) {
+ snprintf(buf, sizeof buf, "ARM_%s_ccnt", pmnc_name);
+ } else {
+ snprintf(buf, sizeof buf, "ARM_%s_cnt%d", pmnc_name, i - 1);
+ }
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i > 0) {
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int gator_events_armv7_online(int **buffer)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+
+ if (armv7_pmnc_read() & PMNC_E) {
+ armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
+ }
+
+ // Initialize & Reset PMNC: C bit and P bit
+ armv7_pmnc_write(PMNC_P | PMNC_C);
+
+ // Reset overflow flags
+ armv7_pmnc_reset_interrupt();
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ // Disable counter
+ armv7_pmnc_disable_counter(cnt);
+
+ event = pmnc_event[cnt] & 255;
+
+ // Set event (if destined for PMNx counters), we don't need to set the event if it's a cycle count
+ if (cnt != CCNT)
+ armv7_pmnc_write_evtsel(cnt, event);
+
+ armv7_pmnc_disable_interrupt(cnt);
+
+ // Reset counter
+ cnt ? armv7_cntn_read(cnt, 0) : armv7_ccnt_read(0);
+
+ // Enable counter
+ armv7_pmnc_enable_counter(cnt);
+ }
+
+ // enable
+ armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
+
+ // return zero values, no need to read as the counters were just reset
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_armv7_offline(int **buffer)
+{
+ // disable all counters, including PMCCNTR; overflow IRQs will not be signaled
+ armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
+
+ return 0;
+}
+
+static void gator_events_armv7_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_armv7_read(int **buffer)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
+ if (!(armv7_pmnc_read() & PMNC_E)) {
+ return 0;
+ }
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ int value;
+ if (cnt == CCNT) {
+ value = armv7_ccnt_read(0);
+ } else {
+ value = armv7_cntn_read(cnt, 0);
+ }
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_armv7_interface = {
+ .create_files = gator_events_armv7_create_files,
+ .stop = gator_events_armv7_stop,
+ .online = gator_events_armv7_online,
+ .offline = gator_events_armv7_offline,
+ .read = gator_events_armv7_read,
+};
+
+int gator_events_armv7_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case CORTEX_A5:
+ pmnc_name = "Cortex-A5";
+ pmnc_counters = 2;
+ break;
+ case CORTEX_A7:
+ pmnc_name = "Cortex-A7";
+ pmnc_counters = 4;
+ break;
+ case CORTEX_A8:
+ pmnc_name = "Cortex-A8";
+ pmnc_counters = 4;
+ break;
+ case CORTEX_A9:
+ pmnc_name = "Cortex-A9";
+ pmnc_counters = 6;
+ break;
+ case CORTEX_A15:
+ pmnc_name = "Cortex-A15";
+ pmnc_counters = 6;
+ break;
+ default:
+ return -1;
+ }
+
+ pmnc_counters++; // CNT[n] + CCNT
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_armv7_interface);
+}
+
+gator_events_init(gator_events_armv7_init);
+
+#else
+int gator_events_armv7_init(void)
+{
+ return -1;
+}
+#endif
diff --git a/drivers/gator/gator_events_block.c b/drivers/gator/gator_events_block.c
new file mode 100644
index 000000000000..f512b135e761
--- /dev/null
+++ b/drivers/gator/gator_events_block.c
@@ -0,0 +1,160 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/block.h>
+
+#define BLOCK_RQ_WR 0
+#define BLOCK_RQ_RD 1
+
+#define BLOCK_TOTAL (BLOCK_RQ_RD+1)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+#define EVENTWRITE REQ_RW
+#else
+#define EVENTWRITE REQ_WRITE
+#endif
+
+static ulong block_rq_wr_enabled;
+static ulong block_rq_rd_enabled;
+static ulong block_rq_wr_key;
+static ulong block_rq_rd_key;
+static atomic_t blockCnt[BLOCK_TOTAL];
+static int blockGet[BLOCK_TOTAL * 4];
+
+GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq))
+{
+ unsigned long flags;
+ int write, size;
+
+ if (!rq)
+ return;
+
+ write = rq->cmd_flags & EVENTWRITE;
+ size = rq->resid_len;
+
+ if (!size)
+ return;
+
+ // disable interrupts to synchronize with gator_events_block_read()
+ // spinlocks not needed since percpu buffers are used
+ local_irq_save(flags);
+ if (write) {
+ if (block_rq_wr_enabled) {
+ atomic_add(size, &blockCnt[BLOCK_RQ_WR]);
+ }
+ } else {
+ if (block_rq_rd_enabled) {
+ atomic_add(size, &blockCnt[BLOCK_RQ_RD]);
+ }
+ }
+ local_irq_restore(flags);
+}
+
+static int gator_events_block_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* block_complete_wr */
+ dir = gatorfs_mkdir(sb, root, "Linux_block_rq_wr");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &block_rq_wr_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_wr_key);
+
+ /* block_complete_rd */
+ dir = gatorfs_mkdir(sb, root, "Linux_block_rq_rd");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &block_rq_rd_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_rd_key);
+
+ return 0;
+}
+
+static int gator_events_block_start(void)
+{
+ // register tracepoints
+ if (block_rq_wr_enabled || block_rq_rd_enabled)
+ if (GATOR_REGISTER_TRACE(block_rq_complete))
+ goto fail_block_rq_exit;
+ pr_debug("gator: registered block event tracepoints\n");
+
+ return 0;
+
+ // unregister tracepoints on error
+fail_block_rq_exit:
+ pr_err("gator: block event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_block_stop(void)
+{
+ if (block_rq_wr_enabled || block_rq_rd_enabled)
+ GATOR_UNREGISTER_TRACE(block_rq_complete);
+ pr_debug("gator: unregistered block event tracepoints\n");
+
+ block_rq_wr_enabled = 0;
+ block_rq_rd_enabled = 0;
+}
+
+static int gator_events_block_read(int **buffer)
+{
+ int len, value, data = 0;
+
+ if (smp_processor_id() != 0) {
+ return 0;
+ }
+
+ len = 0;
+ if (block_rq_wr_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_WR])) > 0) {
+ atomic_sub(value, &blockCnt[BLOCK_RQ_WR]);
+ blockGet[len++] = block_rq_wr_key;
+ blockGet[len++] = 0; // indicates to Streamline that value bytes were written now, not since the last message
+ blockGet[len++] = block_rq_wr_key;
+ blockGet[len++] = value;
+ data += value;
+ }
+ if (block_rq_rd_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_RD])) > 0) {
+ atomic_sub(value, &blockCnt[BLOCK_RQ_RD]);
+ blockGet[len++] = block_rq_rd_key;
+ blockGet[len++] = 0; // indicates to Streamline that value bytes were read now, not since the last message
+ blockGet[len++] = block_rq_rd_key;
+ blockGet[len++] = value;
+ data += value;
+ }
+
+ if (buffer)
+ *buffer = blockGet;
+
+ return len;
+}
+
+static struct gator_interface gator_events_block_interface = {
+ .create_files = gator_events_block_create_files,
+ .start = gator_events_block_start,
+ .stop = gator_events_block_stop,
+ .read = gator_events_block_read,
+};
+
+int gator_events_block_init(void)
+{
+ block_rq_wr_enabled = 0;
+ block_rq_rd_enabled = 0;
+
+ block_rq_wr_key = gator_events_get_key();
+ block_rq_rd_key = gator_events_get_key();
+
+ return gator_events_install(&gator_events_block_interface);
+}
+
+gator_events_init(gator_events_block_init);
diff --git a/drivers/gator/gator_events_irq.c b/drivers/gator/gator_events_irq.c
new file mode 100644
index 000000000000..1221372e330e
--- /dev/null
+++ b/drivers/gator/gator_events_irq.c
@@ -0,0 +1,189 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/irq.h>
+
+#define HARDIRQ 0
+#define SOFTIRQ 1
+#define TOTALIRQ (SOFTIRQ+1)
+
+static ulong hardirq_enabled;
+static ulong softirq_enabled;
+static ulong hardirq_key;
+static ulong softirq_key;
+static DEFINE_PER_CPU(int[TOTALIRQ], irqCnt);
+static DEFINE_PER_CPU(int[TOTALIRQ * 2], irqGet);
+
+GATOR_DEFINE_PROBE(irq_handler_exit,
+ TP_PROTO(int irq, struct irqaction *action, int ret))
+{
+ unsigned long flags;
+
+ // disable interrupts to synchronize with gator_events_irq_read()
+ // spinlocks not needed since percpu buffers are used
+ local_irq_save(flags);
+ per_cpu(irqCnt, smp_processor_id())[HARDIRQ]++;
+ local_irq_restore(flags);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+GATOR_DEFINE_PROBE(softirq_exit, TP_PROTO(struct softirq_action *h, struct softirq_action *vec))
+#else
+GATOR_DEFINE_PROBE(softirq_exit, TP_PROTO(unsigned int vec_nr))
+#endif
+{
+ unsigned long flags;
+
+ // disable interrupts to synchronize with gator_events_irq_read()
+ // spinlocks not needed since percpu buffers are used
+ local_irq_save(flags);
+ per_cpu(irqCnt, smp_processor_id())[SOFTIRQ]++;
+ local_irq_restore(flags);
+}
+
+static int gator_events_irq_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* irq */
+ dir = gatorfs_mkdir(sb, root, "Linux_irq_irq");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &hardirq_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &hardirq_key);
+
+ /* soft irq */
+ dir = gatorfs_mkdir(sb, root, "Linux_irq_softirq");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &softirq_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &softirq_key);
+
+ return 0;
+}
+
+static int gator_events_irq_online(int **buffer)
+{
+ int len = 0, cpu = smp_processor_id();
+ unsigned long flags; // not necessary as we are in interrupt context anyway, but doesn't hurt
+
+ // synchronization with the irq_exit functions is not necessary as the values are being reset
+ if (hardirq_enabled) {
+ local_irq_save(flags);
+ per_cpu(irqCnt, cpu)[HARDIRQ] = 0;
+ local_irq_restore(flags);
+ per_cpu(irqGet, cpu)[len++] = hardirq_key;
+ per_cpu(irqGet, cpu)[len++] = 0;
+ }
+
+ if (softirq_enabled) {
+ local_irq_save(flags);
+ per_cpu(irqCnt, cpu)[SOFTIRQ] = 0;
+ local_irq_restore(flags);
+ per_cpu(irqGet, cpu)[len++] = softirq_key;
+ per_cpu(irqGet, cpu)[len++] = 0;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(irqGet, cpu);
+
+ return len;
+}
+
+static int gator_events_irq_start(void)
+{
+ // register tracepoints
+ if (hardirq_enabled)
+ if (GATOR_REGISTER_TRACE(irq_handler_exit))
+ goto fail_hardirq_exit;
+ if (softirq_enabled)
+ if (GATOR_REGISTER_TRACE(softirq_exit))
+ goto fail_softirq_exit;
+ pr_debug("gator: registered irq tracepoints\n");
+
+ return 0;
+
+ // unregister tracepoints on error
+fail_softirq_exit:
+ if (hardirq_enabled)
+ GATOR_UNREGISTER_TRACE(irq_handler_exit);
+fail_hardirq_exit:
+ pr_err("gator: irq tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_irq_stop(void)
+{
+ if (hardirq_enabled)
+ GATOR_UNREGISTER_TRACE(irq_handler_exit);
+ if (softirq_enabled)
+ GATOR_UNREGISTER_TRACE(softirq_exit);
+ pr_debug("gator: unregistered irq tracepoints\n");
+
+ hardirq_enabled = 0;
+ softirq_enabled = 0;
+}
+
+static int gator_events_irq_read(int **buffer)
+{
+ unsigned long flags; // not necessary as we are in interrupt context anyway, but doesn't hurt
+ int len, value;
+ int cpu = smp_processor_id();
+
+ len = 0;
+ if (hardirq_enabled) {
+ local_irq_save(flags);
+ value = per_cpu(irqCnt, cpu)[HARDIRQ];
+ per_cpu(irqCnt, cpu)[HARDIRQ] = 0;
+ local_irq_restore(flags);
+
+ per_cpu(irqGet, cpu)[len++] = hardirq_key;
+ per_cpu(irqGet, cpu)[len++] = value;
+ }
+
+ if (softirq_enabled) {
+ local_irq_save(flags);
+ value = per_cpu(irqCnt, cpu)[SOFTIRQ];
+ per_cpu(irqCnt, cpu)[SOFTIRQ] = 0;
+ local_irq_restore(flags);
+
+ per_cpu(irqGet, cpu)[len++] = softirq_key;
+ per_cpu(irqGet, cpu)[len++] = value;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(irqGet, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_irq_interface = {
+ .create_files = gator_events_irq_create_files,
+ .online = gator_events_irq_online,
+ .start = gator_events_irq_start,
+ .stop = gator_events_irq_stop,
+ .read = gator_events_irq_read,
+};
+
+int gator_events_irq_init(void)
+{
+ hardirq_key = gator_events_get_key();
+ softirq_key = gator_events_get_key();
+
+ hardirq_enabled = 0;
+ softirq_enabled = 0;
+
+ return gator_events_install(&gator_events_irq_interface);
+}
+
+gator_events_init(gator_events_irq_init);
diff --git a/drivers/gator/gator_events_l2c-310.c b/drivers/gator/gator_events_l2c-310.c
new file mode 100644
index 000000000000..197af040f07c
--- /dev/null
+++ b/drivers/gator/gator_events_l2c-310.c
@@ -0,0 +1,177 @@
+/**
+ * l2c310 (L2 Cache Controller) event counters for gator
+ *
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include "gator.h"
+
+#define L2C310_COUNTERS_NUM 2
+
+static struct {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long key;
+} l2c310_counters[L2C310_COUNTERS_NUM];
+
+static int l2c310_buffer[L2C310_COUNTERS_NUM * 2];
+
+static void __iomem *l2c310_base;
+
+static void gator_events_l2c310_reset_counters(void)
+{
+ u32 val = readl(l2c310_base + L2X0_EVENT_CNT_CTRL);
+
+ val |= ((1 << L2C310_COUNTERS_NUM) - 1) << 1;
+
+ writel(val, l2c310_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static int gator_events_l2c310_create_files(struct super_block *sb,
+ struct dentry *root)
+{
+ int i;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ char buf[16];
+ struct dentry *dir;
+
+ snprintf(buf, sizeof(buf), "L2C-310_cnt%d", i);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (WARN_ON(!dir))
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled",
+ &l2c310_counters[i].enabled);
+ gatorfs_create_ulong(sb, dir, "event",
+ &l2c310_counters[i].event);
+ gatorfs_create_ro_ulong(sb, dir, "key",
+ &l2c310_counters[i].key);
+ }
+
+ return 0;
+}
+
+static int gator_events_l2c310_start(void)
+{
+ static const unsigned long l2x0_event_cntx_cfg[L2C310_COUNTERS_NUM] = {
+ L2X0_EVENT_CNT0_CFG,
+ L2X0_EVENT_CNT1_CFG,
+ };
+ int i;
+
+ /* Counter event sources */
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++)
+ writel((l2c310_counters[i].event & 0xf) << 2,
+ l2c310_base + l2x0_event_cntx_cfg[i]);
+
+ gator_events_l2c310_reset_counters();
+
+ /* Event counter enable */
+ writel(1, l2c310_base + L2X0_EVENT_CNT_CTRL);
+
+ return 0;
+}
+
+static void gator_events_l2c310_stop(void)
+{
+ /* Event counter disable */
+ writel(0, l2c310_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static int gator_events_l2c310_read(int **buffer)
+{
+ static const unsigned long l2x0_event_cntx_val[L2C310_COUNTERS_NUM] = {
+ L2X0_EVENT_CNT0_VAL,
+ L2X0_EVENT_CNT1_VAL,
+ };
+ int i;
+ int len = 0;
+
+ if (smp_processor_id())
+ return 0;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ if (l2c310_counters[i].enabled) {
+ l2c310_buffer[len++] = l2c310_counters[i].key;
+ l2c310_buffer[len++] = readl(l2c310_base +
+ l2x0_event_cntx_val[i]);
+ }
+ }
+
+ /* l2c310 counters are saturating, not wrapping in case of overflow */
+ gator_events_l2c310_reset_counters();
+
+ if (buffer)
+ *buffer = l2c310_buffer;
+
+ return len;
+}
+
+static struct gator_interface gator_events_l2c310_interface = {
+ .create_files = gator_events_l2c310_create_files,
+ .start = gator_events_l2c310_start,
+ .stop = gator_events_l2c310_stop,
+ .read = gator_events_l2c310_read,
+};
+
+static void __maybe_unused gator_events_l2c310_probe(unsigned long phys)
+{
+ if (l2c310_base)
+ return;
+
+ l2c310_base = ioremap(phys, SZ_4K);
+ if (l2c310_base) {
+ u32 cache_id = readl(l2c310_base + L2X0_CACHE_ID);
+
+ if ((cache_id & 0xff0003c0) != 0x410000c0) {
+ iounmap(l2c310_base);
+ l2c310_base = NULL;
+ }
+ }
+}
+
+int gator_events_l2c310_init(void)
+{
+ int i;
+
+ if (gator_cpuid() != CORTEX_A5 && gator_cpuid() != CORTEX_A9)
+ return -1;
+
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_S5PV310)
+ gator_events_l2c310_probe(0x10502000);
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ gator_events_l2c310_probe(0x48242000);
+#endif
+#if defined(CONFIG_ARCH_TEGRA)
+ gator_events_l2c310_probe(0x50043000);
+#endif
+#if defined(CONFIG_ARCH_U8500)
+ gator_events_l2c310_probe(0xa0412000);
+#endif
+#if defined(CONFIG_ARCH_VEXPRESS)
+ // A9x4 core tile (HBI-0191)
+ gator_events_l2c310_probe(0x1e00a000);
+ // New memory map tiles
+ gator_events_l2c310_probe(0x2c0f0000);
+#endif
+ if (!l2c310_base)
+ return -1;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ l2c310_counters[i].enabled = 0;
+ l2c310_counters[i].key = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_l2c310_interface);
+}
+
+gator_events_init(gator_events_l2c310_init);
diff --git a/drivers/gator/gator_events_mali_400.c b/drivers/gator/gator_events_mali_400.c
new file mode 100644
index 000000000000..4888b54e2008
--- /dev/null
+++ b/drivers/gator/gator_events_mali_400.c
@@ -0,0 +1,738 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#include "linux/mali_linux_trace.h"
+
+#include "gator_events_mali_common.h"
+#include "gator_events_mali_400.h"
+
+#if !defined(GATOR_MALI_INTERFACE_STYLE)
+/*
+ * At the moment, we only have users with the old style interface, so
+ * make our life easier by making it the default...
+ */
+#define GATOR_MALI_INTERFACE_STYLE (2)
+#endif
+
+/*
+ * There are (currently) three different variants of the comms between gator and Mali:
+ * 1 (deprecated): No software counter support
+ * 2 (deprecated): Tracepoint called for each separate s/w counter value as it appears
+ * 3 (default): Single tracepoint for all s/w counters in a bundle.
+ * Interface style 3 is the default if no other is specified. 1 and 2 will be eliminated when
+ * existing Mali DDKs are upgraded.
+ */
+
+#if !defined(GATOR_MALI_INTERFACE_STYLE)
+#define GATOR_MALI_INTERFACE_STYLE (3)
+#endif
+
+/*
+ * List of possible actions allowing DDK to be controlled by Streamline.
+ * The following numbers are used by DDK to control the frame buffer dumping.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_EVENTS_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+/*
+ * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
+ */
+#if (MALI_SUPPORT != MALI_400)
+#error MALI_SUPPORT set to an invalid device code: expecting MALI_400
+#endif
+
+/*
+ * The number of fragment processors. Update to suit your hardware implementation.
+ */
+#define NUM_FP_UNITS (4)
+
+enum counters {
+ /* Timeline activity */
+ ACTIVITY_VP = 0,
+ ACTIVITY_FP0,
+ ACTIVITY_FP1,
+ ACTIVITY_FP2,
+ ACTIVITY_FP3,
+
+ /* L2 cache counters */
+ COUNTER_L2_C0,
+ COUNTER_L2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_C0,
+ COUNTER_VP_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP0_C0,
+ COUNTER_FP0_C1,
+ COUNTER_FP1_C0,
+ COUNTER_FP1_C1,
+ COUNTER_FP2_C0,
+ COUNTER_FP2_C1,
+ COUNTER_FP3_C0,
+ COUNTER_FP3_C1,
+
+ /* EGL Software Counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES Software Counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ COUNTER_FILMSTRIP,
+ COUNTER_FREQUENCY,
+ COUNTER_VOLTAGE,
+
+ NUMBER_OF_EVENTS
+};
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP3
+
+#define FIRST_HW_COUNTER COUNTER_L2_C0
+#define LAST_HW_COUNTER COUNTER_FP3_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_VOLTAGE
+
+/* gatorfs variables for counter enable state,
+ * the event the counter should count and the
+ * 'key' (a unique id set by gatord and returned
+ * by gator.ko)
+ */
+static unsigned long counter_enabled[NUMBER_OF_EVENTS];
+static unsigned long counter_event[NUMBER_OF_EVENTS];
+static unsigned long counter_key[NUMBER_OF_EVENTS];
+
+/* The data we have recorded */
+static u32 counter_data[NUMBER_OF_EVENTS];
+/* The address to sample (or 0 if samples are sent to us) */
+static u32 *counter_address[NUMBER_OF_EVENTS];
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static unsigned long counter_dump[NUMBER_OF_EVENTS * 2];
+static unsigned long counter_prev[NUMBER_OF_EVENTS];
+
+/* Note whether tracepoints have been registered */
+static int trace_registered;
+
+/**
+ * Calculate the difference and handle the overflow.
+ */
+static u32 get_difference(u32 start, u32 end)
+{
+ if (start - end >= 0) {
+ return start - end;
+ }
+
+ // Mali counters are unsigned 32 bit values that wrap.
+ return (4294967295u - end) + start;
+}
+
+/**
+ * Returns non-zero if the given counter ID is an activity counter.
+ */
+static inline int is_activity_counter(unsigned int event_id)
+{
+ return (event_id >= FIRST_ACTIVITY_EVENT &&
+ event_id <= LAST_ACTIVITY_EVENT);
+}
+
+/**
+ * Returns non-zero if the given counter ID is a hardware counter.
+ */
+static inline int is_hw_counter(unsigned int event_id)
+{
+ return (event_id >= FIRST_HW_COUNTER && event_id <= LAST_HW_COUNTER);
+}
+
+#if GATOR_MALI_INTERFACE_STYLE == 2
+/**
+ * Returns non-zero if the given counter ID is a software counter.
+ */
+static inline int is_sw_counter(unsigned int event_id)
+{
+ return (event_id >= FIRST_SW_COUNTER && event_id <= LAST_SW_COUNTER);
+}
+#endif
+
+#if GATOR_MALI_INTERFACE_STYLE == 2
+/*
+ * The Mali DDK uses s64 types to contain software counter values, but gator
+ * can only use a maximum of 32 bits. This function scales a software counter
+ * to an appropriate range.
+ */
+static u32 scale_sw_counter_value(unsigned int event_id, signed long long value)
+{
+ u32 scaled_value;
+
+ switch (event_id) {
+ case COUNTER_GLES_UPLOAD_TEXTURE_TIME:
+ case COUNTER_GLES_UPLOAD_VBO_TIME:
+ scaled_value = (u32)div_s64(value, 1000000);
+ break;
+ default:
+ scaled_value = (u32)value;
+ break;
+ }
+
+ return scaled_value;
+}
+#endif
+
+/* Probe for continuously sampled counter */
+#if 0 //WE_DONT_CURRENTLY_USE_THIS_SO_SUPPRESS_WARNING
+GATOR_DEFINE_PROBE(mali_sample_address, TP_PROTO(unsigned int event_id, u32 *addr))
+{
+ /* Turning on too many pr_debug statements in frequently called functions
+ * can cause stability and/or performance problems
+ */
+ //pr_debug("gator: mali_sample_address %d %d\n", event_id, addr);
+ if (event_id >= ACTIVITY_VP && event_id <= COUNTER_FP3_C1) {
+ counter_address[event_id] = addr;
+ }
+}
+#endif
+
+/* Probe for hardware counter events */
+GATOR_DEFINE_PROBE(mali_hw_counter, TP_PROTO(unsigned int event_id, unsigned int value))
+{
+ /* Turning on too many pr_debug statements in frequently called functions
+ * can cause stability and/or performance problems
+ */
+ //pr_debug("gator: mali_hw_counter %d %d\n", event_id, value);
+ if (is_hw_counter(event_id)) {
+ counter_data[event_id] = value;
+ }
+}
+
+#if GATOR_MALI_INTERFACE_STYLE == 2
+GATOR_DEFINE_PROBE(mali_sw_counter, TP_PROTO(unsigned int event_id, signed long long value))
+{
+ if (is_sw_counter(event_id)) {
+ counter_data[event_id] = scale_sw_counter_value(event_id, value);
+ }
+}
+#endif /* GATOR_MALI_INTERFACE_STYLE == 2 */
+
+#if GATOR_MALI_INTERFACE_STYLE == 3
+GATOR_DEFINE_PROBE(mali_sw_counters, TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters))
+{
+ u32 i;
+
+ /* Copy over the values for those counters which are enabled. */
+ for (i = FIRST_SW_COUNTER; i <= LAST_SW_COUNTER; i++) {
+ if (counter_enabled[i]) {
+ counter_data[i] = (u32)(counters[i - FIRST_SW_COUNTER]);
+ }
+ }
+}
+#endif /* GATOR_MALI_INTERFACE_STYLE == 3 */
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int event;
+ int n_fp = NUM_FP_UNITS;
+
+ const char *mali_name = gator_mali_get_mali_name();
+
+ /*
+ * Create the filesystem entries for vertex processor, fragment processor
+ * and L2 cache timeline and hardware counters. Software counters get
+ * special handling after this block.
+ */
+ for (event = FIRST_ACTIVITY_EVENT; event <= LAST_HW_COUNTER; event++) {
+ char buf[40];
+
+ /*
+ * We can skip this event if it's for a non-existent fragment
+ * processor.
+ */
+ if (((event - ACTIVITY_FP0 >= n_fp) && (event < COUNTER_L2_C0))
+ || (((event - COUNTER_FP0_C0) / 2 >= n_fp))) {
+ continue;
+ }
+
+ /* Otherwise, set up the filesystem entry for this event. */
+ switch (event) {
+ case ACTIVITY_VP:
+ snprintf(buf, sizeof buf, "ARM_%s_VP_active", mali_name);
+ break;
+ case ACTIVITY_FP0:
+ case ACTIVITY_FP1:
+ case ACTIVITY_FP2:
+ case ACTIVITY_FP3:
+ snprintf(buf, sizeof buf, "ARM_%s_FP%d_active",
+ mali_name, event - ACTIVITY_FP0);
+ break;
+ case COUNTER_L2_C0:
+ case COUNTER_L2_C1:
+ snprintf(buf, sizeof buf, "ARM_%s_L2_cnt%d",
+ mali_name, event - COUNTER_L2_C0);
+ break;
+ case COUNTER_VP_C0:
+ case COUNTER_VP_C1:
+ snprintf(buf, sizeof buf, "ARM_%s_VP_cnt%d",
+ mali_name, event - COUNTER_VP_C0);
+ break;
+ case COUNTER_FP0_C0:
+ case COUNTER_FP0_C1:
+ case COUNTER_FP1_C0:
+ case COUNTER_FP1_C1:
+ case COUNTER_FP2_C0:
+ case COUNTER_FP2_C1:
+ case COUNTER_FP3_C0:
+ case COUNTER_FP3_C1:
+ snprintf(buf, sizeof buf, "ARM_%s_FP%d_cnt%d",
+ mali_name, (event - COUNTER_FP0_C0) / 2,
+ (event - COUNTER_FP0_C0) % 2);
+ break;
+ default:
+ printk("gator: trying to create file for non-existent counter (%d)\n", event);
+ continue;
+ }
+
+ dir = gatorfs_mkdir(sb, root, buf);
+
+ if (!dir) {
+ return -1;
+ }
+
+ gatorfs_create_ulong(sb, dir, "enabled", &counter_enabled[event]);
+
+ /* Only create an event node for counters that can change what they count */
+ if (event >= COUNTER_L2_C0) {
+ gatorfs_create_ulong(sb, dir, "event", &counter_event[event]);
+ }
+
+ gatorfs_create_ro_ulong(sb, dir, "key", &counter_key[event]);
+ }
+
+ /* Now set up the software counter entries */
+ for (event = FIRST_SW_COUNTER; event <= LAST_SW_COUNTER; event++) {
+ char buf[40];
+
+ snprintf(buf, sizeof(buf), "ARM_%s_SW_%d", mali_name, event);
+
+ dir = gatorfs_mkdir(sb, root, buf);
+
+ if (!dir) {
+ return -1;
+ }
+
+ gatorfs_create_ulong(sb, dir, "enabled", &counter_enabled[event]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &counter_key[event]);
+ }
+
+ /* Now set up the special counter entries */
+ for (event = FIRST_SPECIAL_COUNTER; event <= LAST_SPECIAL_COUNTER; event++) {
+ char buf[40];
+
+ switch (event) {
+ case COUNTER_FILMSTRIP:
+ snprintf(buf, sizeof(buf), "ARM_%s_Filmstrip_cnt0", mali_name);
+ break;
+
+ case COUNTER_FREQUENCY:
+ snprintf(buf, sizeof(buf), "ARM_%s_Frequency", mali_name);
+ break;
+
+ case COUNTER_VOLTAGE:
+ snprintf(buf, sizeof(buf), "ARM_%s_Voltage", mali_name);
+ break;
+
+ default:
+ break;
+ }
+
+ dir = gatorfs_mkdir(sb, root, buf);
+
+ if (!dir) {
+ return -1;
+ }
+
+ gatorfs_create_ulong(sb, dir, "event", &counter_event[event]);
+ gatorfs_create_ulong(sb, dir, "enabled", &counter_enabled[event]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &counter_key[event]);
+ }
+
+ return 0;
+}
+
+/*
+ * Local store for the get_counters entry point into the DDK.
+ * This is stored here since it is used very regularly.
+ */
+static mali_profiling_get_counters_type *mali_get_counters = NULL;
+
+/*
+ * Examine list of software counters and determine if any one is enabled.
+ * Returns 1 if any counter is enabled, 0 if none is.
+ */
+static int is_any_sw_counter_enabled(void)
+{
+ unsigned int i;
+
+ for (i = FIRST_SW_COUNTER; i <= LAST_SW_COUNTER; i++) {
+ if (counter_enabled[i]) {
+ return 1; /* At least one counter is enabled */
+ }
+ }
+
+ return 0; /* No s/w counters enabled */
+}
+
+static void mali_counter_initialize(void)
+{
+ /* If a Mali driver is present and exporting the appropriate symbol
+ * then we can request the HW counters (of which there are only 2)
+ * be configured to count the desired events
+ */
+ mali_profiling_set_event_type *mali_set_hw_event;
+ mali_osk_fb_control_set_type *mali_set_fb_event;
+ mali_profiling_control_type *mali_control;
+
+ mali_set_hw_event = symbol_get(_mali_profiling_set_event);
+
+ if (mali_set_hw_event) {
+ int i;
+
+ pr_debug("gator: mali online _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
+
+ for (i = FIRST_HW_COUNTER; i <= LAST_HW_COUNTER; i++) {
+ if (counter_enabled[i]) {
+ mali_set_hw_event(i, counter_event[i]);
+ } else {
+ mali_set_hw_event(i, 0xFFFFFFFF);
+ }
+ }
+
+ symbol_put(_mali_profiling_set_event);
+ } else {
+ printk("gator: mali online _mali_profiling_set_event symbol not found\n");
+ }
+
+ mali_set_fb_event = symbol_get(_mali_osk_fb_control_set);
+
+ if (mali_set_fb_event) {
+ pr_debug("gator: mali online _mali_osk_fb_control_set symbol @ %p\n", mali_set_fb_event);
+
+ mali_set_fb_event(0, (counter_enabled[COUNTER_FILMSTRIP] ? 1 : 0));
+
+ symbol_put(_mali_osk_fb_control_set);
+ } else {
+ printk("gator: mali online _mali_osk_fb_control_set symbol not found\n");
+ }
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+ if (mali_control) {
+ /* The event attribute in the XML file keeps the actual frame rate. */
+ unsigned int rate = counter_event[COUNTER_FILMSTRIP] & 0xff;
+ unsigned int resize_factor = (counter_event[COUNTER_FILMSTRIP] >> 8) & 0xff;
+
+ pr_debug("gator: mali online _mali_profiling_control symbol @ %p\n", mali_control);
+
+ mali_control(SW_EVENTS_ENABLE, (is_any_sw_counter_enabled() ? 1 : 0));
+ mali_control(FBDUMP_CONTROL_ENABLE, (counter_enabled[COUNTER_FILMSTRIP] ? 1 : 0));
+ mali_control(FBDUMP_CONTROL_RATE, rate);
+ mali_control(FBDUMP_CONTROL_RESIZE_FACTOR, resize_factor);
+
+ pr_debug("gator: sent mali_control enabled=%d, rate=%d\n", (counter_enabled[COUNTER_FILMSTRIP] ? 1 : 0), rate);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ printk("gator: mali online _mali_profiling_control symbol not found\n");
+ }
+
+ mali_get_counters = symbol_get(_mali_profiling_get_counters);
+ if (mali_get_counters) {
+ pr_debug("gator: mali online _mali_profiling_get_counters symbol @ %p\n", mali_get_counters);
+ counter_prev[COUNTER_L2_C0] = 0;
+ counter_prev[COUNTER_L2_C1] = 0;
+ } else {
+ pr_debug("gator WARNING: mali _mali_profiling_get_counters symbol not defined");
+ }
+}
+
+static void mali_counter_deinitialize(void)
+{
+ mali_profiling_set_event_type *mali_set_hw_event;
+ mali_osk_fb_control_set_type *mali_set_fb_event;
+ mali_profiling_control_type *mali_control;
+
+ mali_set_hw_event = symbol_get(_mali_profiling_set_event);
+
+ if (mali_set_hw_event) {
+ int i;
+
+ pr_debug("gator: mali offline _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
+ for (i = FIRST_HW_COUNTER; i <= LAST_HW_COUNTER; i++) {
+ mali_set_hw_event(i, 0xFFFFFFFF);
+ }
+
+ symbol_put(_mali_profiling_set_event);
+ } else {
+ printk("gator: mali offline _mali_profiling_set_event symbol not found\n");
+ }
+
+ mali_set_fb_event = symbol_get(_mali_osk_fb_control_set);
+
+ if (mali_set_fb_event) {
+ pr_debug("gator: mali offline _mali_osk_fb_control_set symbol @ %p\n", mali_set_fb_event);
+
+ mali_set_fb_event(0, 0);
+
+ symbol_put(_mali_osk_fb_control_set);
+ } else {
+ printk("gator: mali offline _mali_osk_fb_control_set symbol not found\n");
+ }
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+
+ if (mali_control) {
+ pr_debug("gator: mali offline _mali_profiling_control symbol @ %p\n", mali_set_fb_event);
+
+ /* Reset the DDK state - disable counter collection */
+ mali_control(SW_EVENTS_ENABLE, 0);
+
+ mali_control(FBDUMP_CONTROL_ENABLE, 0);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ printk("gator: mali offline _mali_profiling_control symbol not found\n");
+ }
+
+ if (mali_get_counters) {
+ symbol_put(_mali_profiling_get_counters);
+ }
+
+}
+
+static int start(void)
+{
+ // register tracepoints
+ if (GATOR_REGISTER_TRACE(mali_hw_counter)) {
+ printk("gator: mali_hw_counter tracepoint failed to activate\n");
+ return -1;
+ }
+
+#if GATOR_MALI_INTERFACE_STYLE == 1
+ /* None. */
+#elif GATOR_MALI_INTERFACE_STYLE == 2
+ /* For patched Mali driver. */
+ if (GATOR_REGISTER_TRACE(mali_sw_counter)) {
+ printk("gator: mali_sw_counter tracepoint failed to activate\n");
+ return -1;
+ }
+#elif GATOR_MALI_INTERFACE_STYLE == 3
+/* For Mali drivers with built-in support. */
+ if (GATOR_REGISTER_TRACE(mali_sw_counters)) {
+ printk("gator: mali_sw_counters tracepoint failed to activate\n");
+ return -1;
+ }
+#else
+#error Unknown GATOR_MALI_INTERFACE_STYLE option.
+#endif
+
+ trace_registered = 1;
+
+ mali_counter_initialize();
+ return 0;
+}
+
+static void stop(void)
+{
+ unsigned int cnt;
+
+ pr_debug("gator: mali stop\n");
+
+ if (trace_registered) {
+ GATOR_UNREGISTER_TRACE(mali_hw_counter);
+
+#if GATOR_MALI_INTERFACE_STYLE == 1
+ /* None. */
+#elif GATOR_MALI_INTERFACE_STYLE == 2
+ /* For patched Mali driver. */
+ GATOR_UNREGISTER_TRACE(mali_sw_counter);
+#elif GATOR_MALI_INTERFACE_STYLE == 3
+ /* For Mali drivers with built-in support. */
+ GATOR_UNREGISTER_TRACE(mali_sw_counters);
+#else
+#error Unknown GATOR_MALI_INTERFACE_STYLE option.
+#endif
+
+ pr_debug("gator: mali timeline tracepoint deactivated\n");
+
+ trace_registered = 0;
+ }
+
+ for (cnt = FIRST_ACTIVITY_EVENT; cnt < NUMBER_OF_EVENTS; cnt++) {
+ counter_enabled[cnt] = 0;
+ counter_event[cnt] = 0;
+ counter_address[cnt] = NULL;
+ }
+
+ mali_counter_deinitialize();
+}
+
+static int read(int **buffer)
+{
+ int cnt, len = 0;
+
+ if (smp_processor_id())
+ return 0;
+
+ // Read the L2 C0 and C1 here.
+ if (counter_enabled[COUNTER_L2_C0] || counter_enabled[COUNTER_L2_C1]) {
+ u32 src0 = 0;
+ u32 val0 = 0;
+ u32 src1 = 0;
+ u32 val1 = 0;
+
+ // Poke the driver to get the counter values
+ if (mali_get_counters) {
+ mali_get_counters(&src0, &val0, &src1, &val1);
+ }
+
+ if (counter_enabled[COUNTER_L2_C0]) {
+ // Calculate and save src0's counter val0
+ counter_dump[len++] = counter_key[COUNTER_L2_C0];
+ counter_dump[len++] = get_difference(val0, counter_prev[COUNTER_L2_C0]);
+ }
+
+ if (counter_enabled[COUNTER_L2_C1]) {
+ // Calculate and save src1's counter val1
+ counter_dump[len++] = counter_key[COUNTER_L2_C1];
+ counter_dump[len++] = get_difference(val1, counter_prev[COUNTER_L2_C1]);
+ }
+
+ // Save the previous values for the counters.
+ counter_prev[COUNTER_L2_C0] = val0;
+ counter_prev[COUNTER_L2_C1] = val1;
+ }
+
+ // Process other (non-timeline) counters.
+ for (cnt = COUNTER_VP_C0; cnt <= LAST_SW_COUNTER; cnt++) {
+ if (counter_enabled[cnt]) {
+ counter_dump[len++] = counter_key[cnt];
+ counter_dump[len++] = counter_data[cnt];
+
+ counter_data[cnt] = 0;
+ }
+ }
+
+ /*
+ * Add in the voltage and frequency counters if enabled. Note that, since these are
+ * actually passed as events, the counter value should not be cleared.
+ */
+ cnt = COUNTER_FREQUENCY;
+ if (counter_enabled[cnt]) {
+ counter_dump[len++] = counter_key[cnt];
+ counter_dump[len++] = counter_data[cnt];
+ }
+
+ cnt = COUNTER_VOLTAGE;
+ if (counter_enabled[cnt]) {
+ counter_dump[len++] = counter_key[cnt];
+ counter_dump[len++] = counter_data[cnt];
+ }
+
+ if (buffer) {
+ *buffer = (int *)counter_dump;
+ }
+
+ return len;
+}
+
+static struct gator_interface gator_events_mali_interface = {
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read,
+};
+
+extern void gator_events_mali_log_dvfs_event(unsigned int frequency_mhz, unsigned int voltage_mv)
+{
+ counter_data[COUNTER_FREQUENCY] = frequency_mhz;
+ counter_data[COUNTER_VOLTAGE] = voltage_mv;
+}
+
+int gator_events_mali_init(void)
+{
+ unsigned int cnt;
+
+ pr_debug("gator: mali init\n");
+
+ for (cnt = FIRST_ACTIVITY_EVENT; cnt < NUMBER_OF_EVENTS; cnt++) {
+ counter_enabled[cnt] = 0;
+ counter_event[cnt] = 0;
+ counter_key[cnt] = gator_events_get_key();
+ counter_address[cnt] = NULL;
+ counter_data[cnt] = 0;
+ }
+
+ trace_registered = 0;
+
+ return gator_events_install(&gator_events_mali_interface);
+}
+
+gator_events_init(gator_events_mali_init);
diff --git a/drivers/gator/gator_events_mali_400.h b/drivers/gator/gator_events_mali_400.h
new file mode 100644
index 000000000000..a09757eb5024
--- /dev/null
+++ b/drivers/gator/gator_events_mali_400.h
@@ -0,0 +1,18 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * Header contains common definitions for the Mali-400 processors.
+ */
+#if !defined(GATOR_EVENTS_MALI_400_H)
+#define GATOR_EVENTS_MALI_400_H
+
+extern void gator_events_mali_log_dvfs_event(unsigned int d0, unsigned int d1);
+
+#endif /* GATOR_EVENTS_MALI_400_H */
diff --git a/drivers/gator/gator_events_mali_common.c b/drivers/gator/gator_events_mali_common.c
new file mode 100644
index 000000000000..2186eee9125c
--- /dev/null
+++ b/drivers/gator/gator_events_mali_common.c
@@ -0,0 +1,74 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include "gator_events_mali_common.h"
+
+static u32 gator_mali_get_id(void)
+{
+ return MALI_SUPPORT;
+}
+
+extern const char *gator_mali_get_mali_name(void)
+{
+ u32 id = gator_mali_get_id();
+
+ switch (id) {
+ case MALI_T6xx:
+ return "Mali-T6xx";
+ case MALI_400:
+ return "Mali-400";
+ default:
+ pr_debug("gator: Mali-T6xx: unknown Mali ID (%d)\n", id);
+ return "Mali-Unknown";
+ }
+}
+
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, mali_counter *counter)
+{
+ int err;
+ char buf[255];
+ struct dentry *dir;
+
+ /* If the counter name is empty ignore it */
+ if (strlen(event_name) != 0) {
+ /* Set up the filesystem entry for this event. */
+ snprintf(buf, sizeof(buf), "ARM_%s_%s", mali_name, event_name);
+
+ dir = gatorfs_mkdir(sb, root, buf);
+
+ if (dir == NULL) {
+ pr_debug("gator: Mali-T6xx: error creating file system for: %s (%s)", event_name, buf);
+ return -1;
+ }
+
+ err = gatorfs_create_ulong(sb, dir, "enabled", &counter->enabled);
+ if (err != 0) {
+ pr_debug("gator: Mali-T6xx: error calling gatorfs_create_ulong for: %s (%s)", event_name, buf);
+ return -1;
+ }
+ err = gatorfs_create_ro_ulong(sb, dir, "key", &counter->key);
+ if (err != 0) {
+ pr_debug("gator: Mali-T6xx: error calling gatorfs_create_ro_ulong for: %s (%s)", event_name, buf);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+extern void gator_mali_initialise_counters(mali_counter counters[], unsigned int n_counters)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < n_counters; cnt++) {
+ mali_counter *counter = &counters[cnt];
+
+ counter->key = gator_events_get_key();
+ counter->enabled = 0;
+ }
+}
diff --git a/drivers/gator/gator_events_mali_common.h b/drivers/gator/gator_events_mali_common.h
new file mode 100644
index 000000000000..8e33edf765a4
--- /dev/null
+++ b/drivers/gator/gator_events_mali_common.h
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#if !defined(GATOR_EVENTS_MALI_COMMON_H)
+#define GATOR_EVENTS_MALI_COMMON_H
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+/* Device codes for each known GPU */
+#define MALI_400 (0x0b07)
+#define MALI_T6xx (0x0056)
+
+/* Ensure that MALI_SUPPORT has been defined to something. */
+#ifndef MALI_SUPPORT
+#error MALI_SUPPORT not defined!
+#endif
+
+/* Values for the supported activity event types */
+#define ACTIVITY_START (1)
+#define ACTIVITY_STOP (2)
+
+/*
+ * Runtime state information for a counter.
+ */
+typedef struct {
+ unsigned long key; /* 'key' (a unique id set by gatord and returned by gator.ko) */
+ unsigned long enabled; /* counter enable state */
+} mali_counter;
+
+/*
+ * Mali-400
+ */
+typedef void mali_profiling_set_event_type(unsigned int, unsigned int);
+typedef void mali_osk_fb_control_set_type(unsigned int, unsigned int);
+typedef void mali_profiling_control_type(unsigned int, unsigned int);
+typedef void mali_profiling_get_counters_type(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
+
+/*
+ * Driver entry points for functions called directly by gator.
+ */
+extern void _mali_profiling_set_event(unsigned int, unsigned int);
+extern void _mali_osk_fb_control_set(unsigned int, unsigned int);
+extern void _mali_profiling_control(unsigned int, unsigned int);
+extern void _mali_profiling_get_counters(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
+
+/**
+ * Returns a name which identifies the GPU type (eg Mali-400, Mali-T6xx).
+ *
+ * @return The name as a constant string.
+ */
+extern const char *gator_mali_get_mali_name(void);
+
+/**
+ * Creates a filesystem entry under /dev/gator relating to the specified event name and key, and
+ * associate the key/enable values with this entry point.
+ *
+ * @param mali_name A name related to the type of GPU, obtained from a call to gator_mali_get_mali_name()
+ * @param event_name The name of the event.
+ * @param sb Linux super block
+ * @param root Directory under which the entry will be created.
+ * @param counter_key Ptr to location which will be associated with the counter key.
+ * @param counter_enabled Ptr to location which will be associated with the counter enable state.
+ *
+ * @return 0 if entry point was created, non-zero if not.
+ */
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, mali_counter *counter);
+
+/**
+ * Initializes the counter array.
+ *
+ * @param keys The array of counters
+ * @param n_counters The number of entries in each of the arrays.
+ */
+extern void gator_mali_initialise_counters(mali_counter counters[], unsigned int n_counters);
+
+#endif /* GATOR_EVENTS_MALI_COMMON_H */
diff --git a/drivers/gator/gator_events_mali_t6xx.c b/drivers/gator/gator_events_mali_t6xx.c
new file mode 100644
index 000000000000..1b3a53d84ac4
--- /dev/null
+++ b/drivers/gator/gator_events_mali_t6xx.c
@@ -0,0 +1,512 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include "linux/mali_linux_trace.h"
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
+ */
+#if (MALI_SUPPORT != MALI_T6xx)
+#error MALI_SUPPORT set to an invalid device code: expecting MALI_T6xx
+#endif
+
+/* Counters for Mali-T6xx:
+ *
+ * - Timeline events
+ * They are tracepoints, but instead of reporting a number they report a START/STOP event.
+ * They are reported in Streamline as number of microseconds while that particular counter was active.
+ *
+ * - SW counters
+ * They are tracepoints reporting a particular number.
+ * They are accumulated in sw_counter_data array until they are passed to Streamline, then they are zeroed.
+ *
+ * - Accumulators
+ * They are the same as software counters but their value is not zeroed.
+ */
+
+/* Timeline (start/stop) activity */
+static const char *timeline_event_names[] = {
+ "PM_SHADER_0",
+ "PM_SHADER_1",
+ "PM_SHADER_2",
+ "PM_SHADER_3",
+ "PM_SHADER_4",
+ "PM_SHADER_5",
+ "PM_SHADER_6",
+ "PM_SHADER_7",
+ "PM_TILER_0",
+ "PM_L2_0",
+ "PM_L2_1",
+ "MMU_AS_0",
+ "MMU_AS_1",
+ "MMU_AS_2",
+ "MMU_AS_3"
+};
+
+enum {
+ PM_SHADER_0 = 0,
+ PM_SHADER_1,
+ PM_SHADER_2,
+ PM_SHADER_3,
+ PM_SHADER_4,
+ PM_SHADER_5,
+ PM_SHADER_6,
+ PM_SHADER_7,
+ PM_TILER_0,
+ PM_L2_0,
+ PM_L2_1,
+ MMU_AS_0,
+ MMU_AS_1,
+ MMU_AS_2,
+ MMU_AS_3
+};
+/* The number of shader blocks in the enum above */
+#define NUM_PM_SHADER (8)
+
+/* Software Counters */
+static const char *software_counter_names[] = {
+ "MMU_PAGE_FAULT_0",
+ "MMU_PAGE_FAULT_1",
+ "MMU_PAGE_FAULT_2",
+ "MMU_PAGE_FAULT_3"
+};
+
+enum {
+ MMU_PAGE_FAULT_0 = 0,
+ MMU_PAGE_FAULT_1,
+ MMU_PAGE_FAULT_2,
+ MMU_PAGE_FAULT_3
+};
+
+/* Software Counters */
+static const char *accumulators_names[] = {
+ "TOTAL_ALLOC_PAGES"
+};
+
+enum {
+ TOTAL_ALLOC_PAGES = 0
+};
+
+#define FIRST_TIMELINE_EVENT (0)
+#define NUMBER_OF_TIMELINE_EVENTS (sizeof(timeline_event_names) / sizeof(timeline_event_names[0]))
+#define FIRST_SOFTWARE_COUNTER (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS)
+#define NUMBER_OF_SOFTWARE_COUNTERS (sizeof(software_counter_names) / sizeof(software_counter_names[0]))
+#define FIRST_ACCUMULATOR (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS)
+#define NUMBER_OF_ACCUMULATORS (sizeof(accumulators_names) / sizeof(accumulators_names[0]))
+#define NUMBER_OF_EVENTS (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS)
+
+/*
+ * gatorfs variables for counter enable state
+ */
+static mali_counter counters[NUMBER_OF_EVENTS];
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static unsigned long counter_dump[NUMBER_OF_EVENTS * 2];
+
+/*
+ * Array holding counter start times (in ns) for each counter. A zero here
+ * indicates that the activity monitored by this counter is not running.
+ */
+static struct timespec timeline_event_starttime[NUMBER_OF_TIMELINE_EVENTS];
+
+/* The data we have recorded */
+static unsigned int timeline_data[NUMBER_OF_TIMELINE_EVENTS];
+static unsigned int sw_counter_data[NUMBER_OF_SOFTWARE_COUNTERS];
+static unsigned int accumulators_data[NUMBER_OF_ACCUMULATORS];
+
+/* Hold the previous timestamp, used to calculate the sample interval. */
+static struct timespec prev_timestamp;
+
+/**
+ * Returns the timespan (in microseconds) between the two specified timestamps.
+ *
+ * @param start Ptr to the start timestamp
+ * @param end Ptr to the end timestamp
+ *
+ * @return Number of microseconds between the two timestamps (can be negative if start follows end).
+ */
+static inline long get_duration_us(const struct timespec *start, const struct timespec *end)
+{
+ long event_duration_us = (end->tv_nsec - start->tv_nsec) / 1000;
+ event_duration_us += (end->tv_sec - start->tv_sec) * 1000000;
+
+ return event_duration_us;
+}
+
+static void record_timeline_event(unsigned int timeline_index, unsigned int type)
+{
+ struct timespec event_timestamp;
+ struct timespec *event_start = &timeline_event_starttime[timeline_index];
+
+ switch (type) {
+ case ACTIVITY_START:
+ /* Get the event time... */
+ getnstimeofday(&event_timestamp);
+
+ /* Remember the start time if the activity is not already started */
+ if (event_start->tv_sec == 0) {
+ *event_start = event_timestamp; /* Structure copy */
+ }
+ break;
+
+ case ACTIVITY_STOP:
+ /* if the counter was started... */
+ if (event_start->tv_sec != 0) {
+ /* Get the event time... */
+ getnstimeofday(&event_timestamp);
+
+ /* Accumulate the duration in us */
+ timeline_data[timeline_index] += get_duration_us(event_start, &event_timestamp);
+
+ /* Reset the start time to indicate the activity is stopped. */
+ event_start->tv_sec = 0;
+ }
+ break;
+
+ default:
+ /* Other activity events are ignored. */
+ break;
+ }
+}
+
+/*
+ * Documentation about the following tracepoints is in mali_linux_trace.h
+ */
+
+GATOR_DEFINE_PROBE(mali_pm_status, TP_PROTO(unsigned int event_id, unsigned long long value))
+{
+#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define BIT_AT(value, pos) ((value >> pos) & 1)
+
+ static unsigned long long previous_shader_bitmask = 0;
+ static unsigned long long previous_tiler_bitmask = 0;
+ static unsigned long long previous_l2_bitmask = 0;
+
+ switch (event_id) {
+ case SHADER_PRESENT_LO:
+ {
+ unsigned long long changed_bitmask = previous_shader_bitmask ^ value;
+ int pos;
+
+ for (pos = 0; pos < NUM_PM_SHADER; ++pos) {
+ if (BIT_AT(changed_bitmask, pos)) {
+ record_timeline_event(PM_SHADER_0 + pos, BIT_AT(value, pos) ? ACTIVITY_START : ACTIVITY_STOP);
+ }
+ }
+
+ previous_shader_bitmask = value;
+ break;
+ }
+
+ case TILER_PRESENT_LO:
+ {
+ unsigned long long changed = previous_tiler_bitmask ^ value;
+
+ if (BIT_AT(changed, 0)) {
+ record_timeline_event(PM_TILER_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+ }
+
+ previous_tiler_bitmask = value;
+ break;
+ }
+
+ case L2_PRESENT_LO:
+ {
+ unsigned long long changed = previous_l2_bitmask ^ value;
+
+ if (BIT_AT(changed, 0)) {
+ record_timeline_event(PM_L2_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+ }
+ if (BIT_AT(changed, 4)) {
+ record_timeline_event(PM_L2_1, BIT_AT(value, 4) ? ACTIVITY_START : ACTIVITY_STOP);
+ }
+
+ previous_l2_bitmask = value;
+ break;
+ }
+
+ default:
+ /* No other blocks are supported at present */
+ break;
+ }
+
+#undef SHADER_PRESENT_LO
+#undef TILER_PRESENT_LO
+#undef L2_PRESENT_LO
+#undef BIT_AT
+}
+
+GATOR_DEFINE_PROBE(mali_page_fault_insert_pages, TP_PROTO(int event_id, unsigned long value))
+{
+ /* We add to the previous since we may receive many tracepoints in one sample period */
+ sw_counter_data[MMU_PAGE_FAULT_0 + event_id] += value;
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_in_use, TP_PROTO(int event_id))
+{
+ record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_START);
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_released, TP_PROTO(int event_id))
+{
+ record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_STOP);
+}
+
+GATOR_DEFINE_PROBE(mali_total_alloc_pages_change, TP_PROTO(long long int event_id))
+{
+ accumulators_data[TOTAL_ALLOC_PAGES] = event_id;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ int event;
+ /*
+ * Create the filesystem for all events
+ */
+ int counter_index = 0;
+ const char *mali_name = gator_mali_get_mali_name();
+
+ for (event = FIRST_TIMELINE_EVENT; event < FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS; event++) {
+ if (gator_mali_create_file_system(mali_name, timeline_event_names[counter_index], sb, root, &counters[event]) != 0) {
+ return -1;
+ }
+ counter_index++;
+ }
+ counter_index = 0;
+ for (event = FIRST_SOFTWARE_COUNTER; event < FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS; event++) {
+ if (gator_mali_create_file_system(mali_name, software_counter_names[counter_index], sb, root, &counters[event]) != 0) {
+ return -1;
+ }
+ counter_index++;
+ }
+ counter_index = 0;
+ for (event = FIRST_ACCUMULATOR; event < FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS; event++) {
+ if (gator_mali_create_file_system(mali_name, accumulators_names[counter_index], sb, root, &counters[event]) != 0) {
+ return -1;
+ }
+ counter_index++;
+ }
+
+ return 0;
+}
+
+static int register_tracepoints(void)
+{
+ if (GATOR_REGISTER_TRACE(mali_pm_status)) {
+ pr_debug("gator: Mali-T6xx: mali_pm_status tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_page_fault_insert_pages)) {
+ pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_mmu_as_in_use)) {
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_mmu_as_released)) {
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_released tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_total_alloc_pages_change)) {
+ pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change tracepoint failed to activate\n");
+ return 0;
+ }
+
+ pr_debug("gator: Mali-T6xx: start\n");
+ pr_debug("gator: Mali-T6xx: mali_pm_status probe is at %p\n", &probe_mali_pm_status);
+ pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages probe is at %p\n", &probe_mali_page_fault_insert_pages);
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use probe is at %p\n", &probe_mali_mmu_as_in_use);
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_released probe is at %p\n", &probe_mali_mmu_as_released);
+ pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change probe is at %p\n", &probe_mali_total_alloc_pages_change);
+
+ return 1;
+}
+
+static int start(void)
+{
+ unsigned int cnt;
+
+ /* Clean all data for the next capture */
+ for (cnt = 0; cnt < NUMBER_OF_TIMELINE_EVENTS; cnt++) {
+ timeline_event_starttime[cnt].tv_sec = timeline_event_starttime[cnt].tv_nsec = 0;
+ timeline_data[cnt] = 0;
+ }
+
+ for (cnt = 0; cnt < NUMBER_OF_SOFTWARE_COUNTERS; cnt++) {
+ sw_counter_data[cnt] = 0;
+ }
+
+ for (cnt = 0; cnt < NUMBER_OF_ACCUMULATORS; cnt++) {
+ accumulators_data[cnt] = 0;
+ }
+
+ /* Register tracepoints */
+ if (register_tracepoints() == 0) {
+ return -1;
+ }
+
+ /*
+ * Set the first timestamp for calculating the sample interval. The first interval could be quite long,
+ * since it will be the time between 'start' and the first 'read'.
+ * This means that timeline values will be divided by a big number for the first sample.
+ */
+ getnstimeofday(&prev_timestamp);
+
+ return 0;
+}
+
+static void stop(void)
+{
+ pr_debug("gator: Mali-T6xx: stop\n");
+
+ /*
+ * It is safe to unregister traces even if they were not successfully
+ * registered, so no need to check.
+ */
+ GATOR_UNREGISTER_TRACE(mali_pm_status);
+ pr_debug("gator: Mali-T6xx: mali_pm_status tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_page_fault_insert_pages);
+ pr_debug("gator: Mali-T6xx: mali_page_fault_insert_pages tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_mmu_as_in_use);
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_in_use tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_mmu_as_released);
+ pr_debug("gator: Mali-T6xx: mali_mmu_as_released tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_total_alloc_pages_change);
+ pr_debug("gator: Mali-T6xx: mali_total_alloc_pages_change tracepoint deactivated\n");
+}
+
+static int read(int **buffer)
+{
+ int cnt;
+ int len = 0;
+ long sample_interval_us = 0;
+ struct timespec read_timestamp;
+
+ if (smp_processor_id() != 0) {
+ return 0;
+ }
+
+ /* Get the start of this sample period. */
+ getnstimeofday(&read_timestamp);
+
+ /*
+ * Calculate the sample interval if the previous sample time is valid.
+ * We use tv_sec since it will not be 0.
+ */
+ if (prev_timestamp.tv_sec != 0) {
+ sample_interval_us = get_duration_us(&prev_timestamp, &read_timestamp);
+ }
+
+ /* Structure copy. Update the previous timestamp. */
+ prev_timestamp = read_timestamp;
+
+ /*
+ * Report the timeline counters (ACTIVITY_START/STOP)
+ */
+ for (cnt = FIRST_TIMELINE_EVENT; cnt < (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS); cnt++) {
+ mali_counter *counter = &counters[cnt];
+ if (counter->enabled) {
+ const int index = cnt - FIRST_TIMELINE_EVENT;
+ unsigned int value;
+
+ /* If the activity is still running, reset its start time to the start of this sample period
+ * to correct the count. Add the time up to the end of the sample onto the count. */
+ if (timeline_event_starttime[index].tv_sec != 0) {
+ const long event_duration = get_duration_us(&timeline_event_starttime[index], &read_timestamp);
+ timeline_data[index] += event_duration;
+ timeline_event_starttime[index] = read_timestamp; /* Activity is still running. */
+ }
+
+ if (sample_interval_us != 0) {
+ /* Convert the counter to a percent-of-sample value */
+ value = (timeline_data[index] * 100) / sample_interval_us;
+ } else {
+ pr_debug("gator: Mali-T6xx: setting value to zero\n");
+ value = 0;
+ }
+
+ /* Clear the counter value ready for the next sample. */
+ timeline_data[index] = 0;
+
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = value;
+ }
+ }
+
+ /* Report the software counters */
+ for (cnt = FIRST_SOFTWARE_COUNTER; cnt < (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS); cnt++) {
+ const mali_counter *counter = &counters[cnt];
+ if (counter->enabled) {
+ const int index = cnt - FIRST_SOFTWARE_COUNTER;
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = sw_counter_data[index];
+ /* Set the value to zero for the next time */
+ sw_counter_data[index] = 0;
+ }
+ }
+
+ /* Report the accumulators */
+ for (cnt = FIRST_ACCUMULATOR; cnt < (FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS); cnt++) {
+ const mali_counter *counter = &counters[cnt];
+ if (counter->enabled) {
+ const int index = cnt - FIRST_ACCUMULATOR;
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = accumulators_data[index];
+ /* Do not zero the accumulator */
+ }
+ }
+
+ /* Update the buffer */
+ if (buffer) {
+ *buffer = (int *)counter_dump;
+ }
+
+ return len;
+}
+
+static struct gator_interface gator_events_mali_t6xx_interface = {
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read
+};
+
+extern int gator_events_mali_t6xx_init(void)
+{
+ pr_debug("gator: Mali-T6xx: sw_counters init\n");
+
+ gator_mali_initialise_counters(counters, NUMBER_OF_EVENTS);
+
+ return gator_events_install(&gator_events_mali_t6xx_interface);
+}
+
+gator_events_init(gator_events_mali_t6xx_init);
diff --git a/drivers/gator/gator_events_mali_t6xx_hw.c b/drivers/gator/gator_events_mali_t6xx_hw.c
new file mode 100644
index 000000000000..72498c8fdbc6
--- /dev/null
+++ b/drivers/gator/gator_events_mali_t6xx_hw.c
@@ -0,0 +1,722 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+/* Mali T6xx DDK includes */
+#include "linux/mali_linux_trace.h"
+#include "kbase/src/common/mali_kbase.h"
+#include "kbase/src/linux/mali_kbase_mem_linux.h"
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Mali-T6xx
+ */
+typedef struct kbase_device *kbase_find_device_type(int);
+typedef kbase_context *kbase_create_context_type(kbase_device *);
+typedef void kbase_destroy_context_type(kbase_context *);
+typedef void *kbase_va_alloc_type(kbase_context *, u32);
+typedef void kbase_va_free_type(kbase_context *, void *);
+typedef mali_error kbase_instr_hwcnt_enable_type(kbase_context *, kbase_uk_hwcnt_setup *);
+typedef mali_error kbase_instr_hwcnt_disable_type(kbase_context *);
+typedef mali_error kbase_instr_hwcnt_clear_type(kbase_context *);
+typedef mali_error kbase_instr_hwcnt_dump_irq_type(kbase_context *);
+typedef mali_bool kbase_instr_hwcnt_dump_complete_type(kbase_context *, mali_bool *);
+
+static kbase_find_device_type *kbase_find_device_symbol;
+static kbase_create_context_type *kbase_create_context_symbol;
+static kbase_va_alloc_type *kbase_va_alloc_symbol;
+static kbase_instr_hwcnt_enable_type *kbase_instr_hwcnt_enable_symbol;
+static kbase_instr_hwcnt_clear_type *kbase_instr_hwcnt_clear_symbol;
+static kbase_instr_hwcnt_dump_irq_type *kbase_instr_hwcnt_dump_irq_symbol;
+static kbase_instr_hwcnt_dump_complete_type *kbase_instr_hwcnt_dump_complete_symbol;
+static kbase_instr_hwcnt_disable_type *kbase_instr_hwcnt_disable_symbol;
+static kbase_va_free_type *kbase_va_free_symbol;
+static kbase_destroy_context_type *kbase_destroy_context_symbol;
+
+/** The interval between reads, in ns.
+ *
+ * Earlier we introduced
+ * a 'hold off for 1ms after last read' to resolve MIDBASE-2178 and MALINE-724.
+ * However, the 1ms hold off is too long if no context switches occur as there is a race
+ * between this value and the tick of the read clock in gator which is also 1ms. If we 'miss' the
+ * current read, the counter values are effectively 'spread' over 2ms and the values seen are half
+ * what they should be (since Streamline averages over sample time). In the presence of context switches
+ * this spread can vary and markedly affect the counters. Currently there is no 'proper' solution to
+ * this, but empirically we have found that reducing the minimum read interval to 950us causes the
+ * counts to be much more stable.
+ */
+static const int READ_INTERVAL_NSEC = 950000;
+
+#if GATOR_TEST
+#include "gator_events_mali_t6xx_hw_test.c"
+#endif
+
+/* Blocks for HW counters */
+enum {
+ JM_BLOCK = 0,
+ TILER_BLOCK,
+ SHADER_BLOCK,
+ MMU_BLOCK
+};
+
+/* Counters for Mali-T6xx:
+ *
+ * - HW counters, 4 blocks
+ * For HW counters we need strings to create /dev/gator/events files.
+ * Enums are not needed because the position of the HW name in the array is the same
+ * of the corresponding value in the received block of memory.
+ * HW counters are requested by calculating a bitmask, passed then to the driver.
+ * Every millisecond a HW counters dump is requested, and if the previous has been completed they are read.
+ */
+
+/* Hardware Counters */
+static const char *const hardware_counter_names[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "MESSAGES_SENT",
+ "MESSAGES_RECEIVED",
+ "GPU_ACTIVE", /* 6 */
+ "IRQ_ACTIVE",
+ "JS0_JOBS",
+ "JS0_TASKS",
+ "JS0_ACTIVE",
+ "",
+ "JS0_WAIT_READ",
+ "JS0_WAIT_ISSUE",
+ "JS0_WAIT_DEPEND",
+ "JS0_WAIT_FINISH",
+ "JS1_JOBS",
+ "JS1_TASKS",
+ "JS1_ACTIVE",
+ "",
+ "JS1_WAIT_READ",
+ "JS1_WAIT_ISSUE",
+ "JS1_WAIT_DEPEND",
+ "JS1_WAIT_FINISH",
+ "JS2_JOBS",
+ "JS2_TASKS",
+ "JS2_ACTIVE",
+ "",
+ "JS2_WAIT_READ",
+ "JS2_WAIT_ISSUE",
+ "JS2_WAIT_DEPEND",
+ "JS2_WAIT_FINISH",
+ "JS3_JOBS",
+ "JS3_TASKS",
+ "JS3_ACTIVE",
+ "",
+ "JS3_WAIT_READ",
+ "JS3_WAIT_ISSUE",
+ "JS3_WAIT_DEPEND",
+ "JS3_WAIT_FINISH",
+ "JS4_JOBS",
+ "JS4_TASKS",
+ "JS4_ACTIVE",
+ "",
+ "JS4_WAIT_READ",
+ "JS4_WAIT_ISSUE",
+ "JS4_WAIT_DEPEND",
+ "JS4_WAIT_FINISH",
+ "JS5_JOBS",
+ "JS5_TASKS",
+ "JS5_ACTIVE",
+ "",
+ "JS5_WAIT_READ",
+ "JS5_WAIT_ISSUE",
+ "JS5_WAIT_DEPEND",
+ "JS5_WAIT_FINISH",
+ "JS6_JOBS",
+ "JS6_TASKS",
+ "JS6_ACTIVE",
+ "",
+ "JS6_WAIT_READ",
+ "JS6_WAIT_ISSUE",
+ "JS6_WAIT_DEPEND",
+ "JS6_WAIT_FINISH",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "JOBS_PROCESSED",
+ "TRIANGLES",
+ "QUADS",
+ "POLYGONS",
+ "POINTS",
+ "LINES",
+ "VCACHE_HIT",
+ "VCACHE_MISS",
+ "FRONT_FACING",
+ "BACK_FACING",
+ "PRIM_VISIBLE",
+ "PRIM_CULLED",
+ "PRIM_CLIPPED",
+ "LEVEL0",
+ "LEVEL1",
+ "LEVEL2",
+ "LEVEL3",
+ "LEVEL4",
+ "LEVEL5",
+ "LEVEL6",
+ "LEVEL7",
+ "COMMAND_1",
+ "COMMAND_2",
+ "COMMAND_3",
+ "COMMAND_4",
+ "COMMAND_4_7",
+ "COMMAND_8_15",
+ "COMMAND_16_63",
+ "COMMAND_64",
+ "COMPRESS_IN",
+ "COMPRESS_OUT",
+ "COMPRESS_FLUSH",
+ "TIMESTAMPS",
+ "PCACHE_HIT",
+ "PCACHE_MISS",
+ "PCACHE_LINE",
+ "PCACHE_STALL",
+ "WRBUF_HIT",
+ "WRBUF_MISS",
+ "WRBUF_LINE",
+ "WRBUF_PARTIAL",
+ "WRBUF_STALL",
+ "ACTIVE",
+ "LOADING_DESC",
+ "INDEX_WAIT",
+ "INDEX_RANGE_WAIT",
+ "VERTEX_WAIT",
+ "PCACHE_WAIT",
+ "WRBUF_WAIT",
+ "BUS_READ",
+ "BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "UTLB_STALL",
+ "UTLB_REPLAY_MISS",
+ "UTLB_REPLAY_FULL",
+ "UTLB_NEW_MISS",
+ "UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "SHADER_CORE_ACTIVE",
+ "FRAG_ACTIVE",
+ "FRAG_PRIMATIVES",
+ "FRAG_PRIMATIVES_DROPPED",
+ "FRAG_CYCLE_DESC",
+ "FRAG_CYCLES_PLR",
+ "FRAG_CYCLES_VERT",
+ "FRAG_CYCLES_TRISETUP",
+ "FRAG_CYCLES_RAST",
+ "FRAG_THREADS",
+ "FRAG_DUMMY_THREADS",
+ "FRAG_QUADS_RAST",
+ "FRAG_QUADS_EZS_TEST",
+ "FRAG_QUADS_EZS_KILLED",
+ "FRAG_QUADS_LZS_TEST",
+ "FRAG_QUADS_LZS_KILLED",
+ "FRAG_CYCLE_NO_TILE",
+ "FRAG_NUM_TILES",
+ "FRAG_TRANS_ELIM",
+ "COMPUTE_ACTIVE",
+ "COMPUTE_TASKS",
+ "COMPUTE_THREADS",
+ "COMPUTE_CYCLES_DESC",
+ "TRIPIPE_ACTIVE",
+ "ARITH_WORDS",
+ "ARITH_CYCLES_REG",
+ "ARITH_CYCLES_L0",
+ "ARITH_FRAG_DEPEND",
+ "LS_WORDS",
+ "LS_ISSUES",
+ "LS_RESTARTS",
+ "LS_REISSUES_MISS",
+ "LS_REISSUES_VD",
+ "LS_REISSUE_ATTRIB_MISS",
+ "LS_NO_WB",
+ "TEX_WORDS",
+ "TEX_BUBBLES",
+ "TEX_WORDS_L0",
+ "TEX_WORDS_DESC",
+ "TEX_THREADS",
+ "TEX_RECIRC_FMISS",
+ "TEX_RECIRC_DESC",
+ "TEX_RECIRC_MULTI",
+ "TEX_RECIRC_PMISS",
+ "TEX_RECIRC_CONF",
+ "LSC_READ_HITS",
+ "LSC_READ_MISSES",
+ "LSC_WRITE_HITS",
+ "LSC_WRITE_MISSES",
+ "LSC_ATOMIC_HITS",
+ "LSC_ATOMIC_MISSES",
+ "LSC_LINE_FETCHES",
+ "LSC_DIRTY_LINE",
+ "LSC_SNOOPS",
+ "AXI_TLB_STALL",
+ "AXI_TLB_MIESS",
+ "AXI_TLB_TRANSACTION",
+ "LS_TLB_MISS",
+ "LS_TLB_HIT",
+ "AXI_BEATS_READ",
+ "AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "MMU_TABLE_WALK",
+ "MMU_REPLAY_MISS",
+ "MMU_REPLAY_FULL",
+ "MMU_NEW_MISS",
+ "MMU_HIT",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "UTLB_STALL",
+ "UTLB_REPLAY_MISS",
+ "UTLB_REPLAY_FULL",
+ "UTLB_NEW_MISS",
+ "UTLB_HIT",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "L2_WRITE_BEATS",
+ "L2_READ_BEATS",
+ "L2_ANY_LOOKUP",
+ "L2_READ_LOOKUP",
+ "L2_SREAD_LOOKUP",
+ "L2_READ_REPLAY",
+ "L2_READ_SNOOP",
+ "L2_READ_HIT",
+ "L2_CLEAN_MISS",
+ "L2_WRITE_LOOKUP",
+ "L2_SWRITE_LOOKUP",
+ "L2_WRITE_REPLAY",
+ "L2_WRITE_SNOOP",
+ "L2_WRITE_HIT",
+ "L2_EXT_READ_FULL",
+ "L2_EXT_READ_HALF",
+ "L2_EXT_WRITE_FULL",
+ "L2_EXT_WRITE_HALF",
+ "L2_EXT_READ",
+ "L2_EXT_READ_LINE",
+ "L2_EXT_WRITE",
+ "L2_EXT_WRITE_LINE",
+ "L2_EXT_WRITE_SMALL",
+ "L2_EXT_BARRIER",
+ "L2_EXT_AR_STALL",
+ "L2_EXT_R_BUF_FULL",
+ "L2_EXT_RD_BUF_FULL",
+ "L2_EXT_R_RAW",
+ "L2_EXT_W_STALL",
+ "L2_EXT_W_BUF_FULL",
+ "L2_EXT_R_W_HAZARD",
+ "L2_TAG_HAZARD",
+ "L2_SNOOP_FULL",
+ "L2_REPLAY_FULL"
+};
+
+#define NUMBER_OF_HARDWARE_COUNTERS (sizeof(hardware_counter_names) / sizeof(hardware_counter_names[0]))
+
+#define GET_HW_BLOCK(c) (((c) >> 6) & 0x3)
+#define GET_COUNTER_OFFSET(c) ((c) & 0x3f)
+
+/* Memory to dump hardware counters into */
+static void *kernel_dump_buffer;
+
+/* kbase context and device */
+static kbase_context *kbcontext = NULL;
+static struct kbase_device *kbdevice = NULL;
+
+/*
+ * The following function has no external prototype in older DDK revisions. When the DDK
+ * is updated then this should be removed.
+ */
+struct kbase_device *kbase_find_device(int minor);
+
+static volatile bool kbase_device_busy = false;
+static unsigned int num_hardware_counters_enabled;
+
+/*
+ * gatorfs variables for counter enable state
+ */
+static mali_counter counters[NUMBER_OF_HARDWARE_COUNTERS];
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static unsigned long counter_dump[NUMBER_OF_HARDWARE_COUNTERS * 2];
+
+#define SYMBOL_GET(FUNCTION, ERROR_COUNT) \
+ if(FUNCTION ## _symbol) \
+ { \
+ printk("gator: mali " #FUNCTION " symbol was already registered\n"); \
+ (ERROR_COUNT)++; \
+ } \
+ else \
+ { \
+ FUNCTION ## _symbol = symbol_get(FUNCTION); \
+ if(! FUNCTION ## _symbol) \
+ { \
+ printk("gator: mali online " #FUNCTION " symbol not found\n"); \
+ (ERROR_COUNT)++; \
+ } \
+ }
+
+#define SYMBOL_CLEANUP(FUNCTION) \
+ if(FUNCTION ## _symbol) \
+ { \
+ symbol_put(FUNCTION); \
+ FUNCTION ## _symbol = NULL; \
+ }
+
+/**
+ * Execute symbol_get for all the Mali symbols and check for success.
+ * @return the number of symbols not loaded.
+ */
+static int init_symbols(void)
+{
+ int error_count = 0;
+ SYMBOL_GET(kbase_find_device, error_count);
+ SYMBOL_GET(kbase_create_context, error_count);
+ SYMBOL_GET(kbase_va_alloc, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_enable, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_clear, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_dump_irq, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_dump_complete, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_disable, error_count);
+ SYMBOL_GET(kbase_va_free, error_count);
+ SYMBOL_GET(kbase_destroy_context, error_count);
+
+ return error_count;
+}
+
+/**
+ * Execute symbol_put for all the registered Mali symbols.
+ */
+static void clean_symbols(void)
+{
+ SYMBOL_CLEANUP(kbase_find_device);
+ SYMBOL_CLEANUP(kbase_create_context);
+ SYMBOL_CLEANUP(kbase_va_alloc);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_enable);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_clear);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_irq);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_complete);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_disable);
+ SYMBOL_CLEANUP(kbase_va_free);
+ SYMBOL_CLEANUP(kbase_destroy_context);
+}
+
+/**
+ * Determines whether a read should take place
+ * @param current_time The current time, obtained from getnstimeofday()
+ * @param prev_time_s The number of seconds at the previous read attempt.
+ * @param next_read_time_ns The time (in ns) when the next read should be allowed.
+ *
+ * Note that this function has been separated out here to allow it to be tested.
+ */
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns)
+{
+ /* If the current ns count rolls over a second, roll the next read time too. */
+ if (current_time->tv_sec != *prev_time_s) {
+ *next_read_time_ns = *next_read_time_ns - NSEC_PER_SEC;
+ }
+
+ /* Abort the read if the next read time has not arrived. */
+ if (current_time->tv_nsec < *next_read_time_ns) {
+ return 0;
+ }
+
+ /* Set the next read some fixed time after this one, and update the read timestamp. */
+ *next_read_time_ns = current_time->tv_nsec + READ_INTERVAL_NSEC;
+
+ *prev_time_s = current_time->tv_sec;
+ return 1;
+}
+
+static int start(void)
+{
+ kbase_uk_hwcnt_setup setup;
+ mali_error err;
+ int cnt;
+ u16 bitmask[] = { 0, 0, 0, 0 };
+
+ /* Setup HW counters */
+ num_hardware_counters_enabled = 0;
+
+ if (NUMBER_OF_HARDWARE_COUNTERS != 256) {
+ pr_debug("Unexpected number of hardware counters defined: expecting 256, got %d\n", NUMBER_OF_HARDWARE_COUNTERS);
+ }
+
+ /* Calculate enable bitmasks based on counters_enabled array */
+ for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
+ const mali_counter *counter = &counters[cnt];
+ if (counter->enabled) {
+ int block = GET_HW_BLOCK(cnt);
+ int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
+ bitmask[block] |= (1 << enable_bit);
+ pr_debug("gator: Mali-T6xx: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
+ num_hardware_counters_enabled++;
+ }
+ }
+
+ /* Create a kbase context for HW counters */
+ if (num_hardware_counters_enabled > 0) {
+ if (init_symbols() > 0) {
+ clean_symbols();
+ /* No Mali driver code entrypoints found - not a fault. */
+ return 0;
+ }
+
+ kbdevice = kbase_find_device_symbol(-1);
+
+ /* If we already got a context, fail */
+ if (kbcontext) {
+ pr_debug("gator: Mali-T6xx: error context already present\n");
+ goto out;
+ }
+
+ /* kbcontext will only be valid after all the Mali symbols are loaded successfully */
+ kbcontext = kbase_create_context_symbol(kbdevice);
+ if (!kbcontext) {
+ pr_debug("gator: Mali-T6xx: error creating kbase context\n");
+ goto out;
+ }
+
+ /*
+ * The amount of memory needed to store the dump (bytes)
+ * DUMP_SIZE = number of core groups
+ * * number of blocks (always 8 for midgard)
+ * * number of counters per block (always 64 for midgard)
+ * * number of bytes per counter (always 4 in midgard)
+ * For a Mali-T6xx with a single core group = 1 * 8 * 64 * 4
+ */
+ kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 2048);
+ if (!kernel_dump_buffer) {
+ pr_debug("gator: Mali-T6xx: error trying to allocate va\n");
+ goto destroy_context;
+ }
+
+ setup.dump_buffer = (uintptr_t)kernel_dump_buffer;
+ setup.jm_bm = bitmask[JM_BLOCK];
+ setup.tiler_bm = bitmask[TILER_BLOCK];
+ setup.shader_bm = bitmask[SHADER_BLOCK];
+ setup.mmu_l2_bm = bitmask[MMU_BLOCK];
+ /* These counters do not exist on Mali-T60x */
+ setup.l3_cache_bm = 0;
+
+ /* Use kbase API to enable hardware counters and provide dump buffer */
+ err = kbase_instr_hwcnt_enable_symbol(kbcontext, &setup);
+ if (err != MALI_ERROR_NONE) {
+ pr_debug("gator: Mali-T6xx: can't setup hardware counters\n");
+ goto free_buffer;
+ }
+ pr_debug("gator: Mali-T6xx: hardware counters enabled\n");
+ kbase_instr_hwcnt_clear_symbol(kbcontext);
+ pr_debug("gator: Mali-T6xx: hardware counters cleared \n");
+
+ kbase_device_busy = false;
+ }
+
+ return 0;
+
+free_buffer:
+ kbase_va_free_symbol(kbcontext, kernel_dump_buffer);
+
+destroy_context:
+ kbase_destroy_context_symbol(kbcontext);
+
+out:
+ clean_symbols();
+ return -1;
+}
+
+static void stop(void)
+{
+ unsigned int cnt;
+ kbase_context *temp_kbcontext;
+
+ pr_debug("gator: Mali-T6xx: stop\n");
+
+ /* Set all counters as disabled */
+ for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
+ counters[cnt].enabled = 0;
+ }
+
+ /* Destroy the context for HW counters */
+ if (num_hardware_counters_enabled > 0 && kbcontext != NULL) {
+ /*
+ * Set the global variable to NULL before destroying it, because
+ * other function will check this before using it.
+ */
+ temp_kbcontext = kbcontext;
+ kbcontext = NULL;
+
+ kbase_instr_hwcnt_disable_symbol(temp_kbcontext);
+ kbase_va_free_symbol(temp_kbcontext, kernel_dump_buffer);
+ kbase_destroy_context_symbol(temp_kbcontext);
+
+ pr_debug("gator: Mali-T6xx: hardware counters stopped\n");
+
+ clean_symbols();
+ }
+}
+
+static int read(int **buffer)
+{
+ int cnt;
+ int len = 0;
+ u32 value = 0;
+ mali_bool success;
+
+ struct timespec current_time;
+ static u32 prev_time_s = 0;
+ static s32 next_read_time_ns = 0;
+
+ if (smp_processor_id() != 0) {
+ return 0;
+ }
+
+ getnstimeofday(&current_time);
+
+ /*
+ * Discard reads unless a respectable time has passed. This reduces the load on the GPU without sacrificing
+ * accuracy on the Streamline display.
+ */
+ if (!is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns)) {
+ return 0;
+ }
+
+ /*
+ * Report the HW counters
+ * Only process hardware counters if at least one of the hardware counters is enabled.
+ */
+ if (num_hardware_counters_enabled > 0) {
+ const unsigned int vithar_blocks[] = {
+ 0x700, /* VITHAR_JOB_MANAGER, Block 0 */
+ 0x400, /* VITHAR_TILER, Block 1 */
+ 0x000, /* VITHAR_SHADER_CORE, Block 2 */
+ 0x500 /* VITHAR_MEMORY_SYSTEM, Block 3 */
+ };
+
+ if (!kbcontext) {
+ return -1;
+ }
+
+ /* Mali symbols can be called safely since a kbcontext is valid */
+ if (kbase_instr_hwcnt_dump_complete_symbol(kbcontext, &success) == MALI_TRUE) {
+ kbase_device_busy = false;
+
+ if (success == MALI_TRUE) {
+ for (cnt = 0; cnt < NUMBER_OF_HARDWARE_COUNTERS; cnt++) {
+ const mali_counter *counter = &counters[cnt];
+ if (counter->enabled) {
+ const int block = GET_HW_BLOCK(cnt);
+ const int counter_offset = GET_COUNTER_OFFSET(cnt);
+ const u32 *counter_block = (u32 *) ((uintptr_t)kernel_dump_buffer + vithar_blocks[block]);
+ const u32 *counter_address = counter_block + counter_offset;
+
+ value = *counter_address;
+
+ if (block == SHADER_BLOCK) {
+ /* (counter_address + 0x000) has already been accounted-for above. */
+ value += *(counter_address + 0x100);
+ value += *(counter_address + 0x200);
+ value += *(counter_address + 0x300);
+ }
+
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = value;
+ }
+ }
+ }
+ }
+
+ if (!kbase_device_busy) {
+ kbase_device_busy = true;
+ kbase_instr_hwcnt_dump_irq_symbol(kbcontext);
+ }
+ }
+
+ /* Update the buffer */
+ if (buffer) {
+ *buffer = (int *)counter_dump;
+ }
+
+ return len;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ unsigned int event;
+ /*
+ * Create the filesystem for all events
+ */
+ int counter_index = 0;
+ const char *mali_name = gator_mali_get_mali_name();
+
+ for (event = 0; event < NUMBER_OF_HARDWARE_COUNTERS; event++) {
+ if (gator_mali_create_file_system(mali_name, hardware_counter_names[counter_index], sb, root, &counters[event]) != 0)
+ return -1;
+ counter_index++;
+ }
+
+ return 0;
+}
+
+static struct gator_interface gator_events_mali_t6xx_interface = {
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read
+};
+
+int gator_events_mali_t6xx_hw_init(void)
+{
+ pr_debug("gator: Mali-T6xx: sw_counters init\n");
+
+#if GATOR_TEST
+ test_all_is_read_scheduled();
+#endif
+
+ gator_mali_initialise_counters(counters, NUMBER_OF_HARDWARE_COUNTERS);
+
+ return gator_events_install(&gator_events_mali_t6xx_interface);
+}
+
+gator_events_init(gator_events_mali_t6xx_hw_init);
diff --git a/drivers/gator/gator_events_mali_t6xx_hw_test.c b/drivers/gator/gator_events_mali_t6xx_hw_test.c
new file mode 100644
index 000000000000..eb77110dc900
--- /dev/null
+++ b/drivers/gator/gator_events_mali_t6xx_hw_test.c
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/**
+ * Test functions for mali_t600_hw code.
+ */
+
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns);
+
+static int test_is_read_scheduled(u32 s, u32 ns, u32 prev_s, s32 next_ns, int expected_result, s32 expected_next_ns)
+{
+ struct timespec current_time;
+ u32 prev_time_s = prev_s;
+ s32 next_read_time_ns = next_ns;
+
+ current_time.tv_sec = s;
+ current_time.tv_nsec = ns;
+
+ if (is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns) != expected_result) {
+ printk("Failed do_read(%u, %u, %u, %d): expected %d\n", s, ns, prev_s, next_ns, expected_result);
+ return 0;
+ }
+
+ if (next_read_time_ns != expected_next_ns) {
+ printk("Failed: next_read_ns expected=%d, actual=%d\n", expected_next_ns, next_read_time_ns);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void test_all_is_read_scheduled(void)
+{
+ const int HIGHEST_NS = 999999999;
+ int n_tests_passed = 0;
+
+ printk("gator: running tests on %s\n", __FILE__);
+
+ n_tests_passed += test_is_read_scheduled(0, 0, 0, 0, 1, READ_INTERVAL_NSEC); /* Null time */
+ n_tests_passed += test_is_read_scheduled(100, 1000, 0, 0, 1, READ_INTERVAL_NSEC + 1000); /* Initial values */
+
+ n_tests_passed += test_is_read_scheduled(100, HIGHEST_NS, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500);
+ n_tests_passed += test_is_read_scheduled(101, 0001, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500 - NSEC_PER_SEC);
+ n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500 - NSEC_PER_SEC, 1, 600 + READ_INTERVAL_NSEC);
+
+ n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500, 1, 600 + READ_INTERVAL_NSEC);
+
+ printk("gator: %d tests passed\n", n_tests_passed);
+}
diff --git a/drivers/gator/gator_events_meminfo.c b/drivers/gator/gator_events_meminfo.c
new file mode 100644
index 000000000000..fd063b25a247
--- /dev/null
+++ b/drivers/gator/gator_events_meminfo.c
@@ -0,0 +1,240 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <linux/workqueue.h>
+#include <trace/events/kmem.h>
+#include <linux/hardirq.h>
+
+#define MEMINFO_MEMFREE 0
+#define MEMINFO_MEMUSED 1
+#define MEMINFO_BUFFERRAM 2
+#define MEMINFO_TOTAL 3
+
+static ulong meminfo_global_enabled;
+static ulong meminfo_enabled[MEMINFO_TOTAL];
+static ulong meminfo_key[MEMINFO_TOTAL];
+static unsigned long long meminfo_buffer[MEMINFO_TOTAL * 2];
+static int meminfo_length = 0;
+static unsigned int mem_event = 0;
+static bool new_data_avail;
+
+static void wq_sched_handler(struct work_struct *wsptr);
+DECLARE_WORK(work, wq_sched_handler);
+static struct timer_list meminfo_wake_up_timer;
+static void meminfo_wake_up_handler(unsigned long unused_data);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+GATOR_DEFINE_PROBE(mm_page_free_direct, TP_PROTO(struct page *page, unsigned int order))
+#else
+GATOR_DEFINE_PROBE(mm_page_free, TP_PROTO(struct page *page, unsigned int order))
+#endif
+{
+ mem_event++;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+GATOR_DEFINE_PROBE(mm_pagevec_free, TP_PROTO(struct page *page, int cold))
+#else
+GATOR_DEFINE_PROBE(mm_page_free_batched, TP_PROTO(struct page *page, int cold))
+#endif
+{
+ mem_event++;
+}
+
+GATOR_DEFINE_PROBE(mm_page_alloc, TP_PROTO(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype))
+{
+ mem_event++;
+}
+
+static int gator_events_meminfo_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ switch (i) {
+ case MEMINFO_MEMFREE:
+ dir = gatorfs_mkdir(sb, root, "Linux_meminfo_memfree");
+ break;
+ case MEMINFO_MEMUSED:
+ dir = gatorfs_mkdir(sb, root, "Linux_meminfo_memused");
+ break;
+ case MEMINFO_BUFFERRAM:
+ dir = gatorfs_mkdir(sb, root, "Linux_meminfo_bufferram");
+ break;
+ default:
+ return -1;
+ }
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &meminfo_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &meminfo_key[i]);
+ }
+
+ return 0;
+}
+
+static int gator_events_meminfo_start(void)
+{
+ int i;
+
+ new_data_avail = true;
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ if (meminfo_enabled[i]) {
+ meminfo_global_enabled = 1;
+ }
+ }
+
+ if (meminfo_global_enabled == 0)
+ return 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ if (GATOR_REGISTER_TRACE(mm_page_free_direct))
+#else
+ if (GATOR_REGISTER_TRACE(mm_page_free))
+#endif
+ goto mm_page_free_exit;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ if (GATOR_REGISTER_TRACE(mm_pagevec_free))
+#else
+ if (GATOR_REGISTER_TRACE(mm_page_free_batched))
+#endif
+ goto mm_page_free_batched_exit;
+ if (GATOR_REGISTER_TRACE(mm_page_alloc))
+ goto mm_page_alloc_exit;
+
+ setup_timer(&meminfo_wake_up_timer, meminfo_wake_up_handler, 0);
+ return 0;
+
+mm_page_alloc_exit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_pagevec_free);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free_batched);
+#endif
+mm_page_free_batched_exit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_page_free_direct);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free);
+#endif
+mm_page_free_exit:
+ return -1;
+}
+
+static void gator_events_meminfo_stop(void)
+{
+ int i;
+
+ if (meminfo_global_enabled) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_page_free_direct);
+ GATOR_UNREGISTER_TRACE(mm_pagevec_free);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free);
+ GATOR_UNREGISTER_TRACE(mm_page_free_batched);
+#endif
+ GATOR_UNREGISTER_TRACE(mm_page_alloc);
+
+ del_timer_sync(&meminfo_wake_up_timer);
+ }
+
+ meminfo_global_enabled = 0;
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ meminfo_enabled[i] = 0;
+ }
+}
+
+// Must be run in process context as the kernel function si_meminfo() can sleep
+static void wq_sched_handler(struct work_struct *wsptr)
+{
+ struct sysinfo info;
+ int i, len;
+ unsigned long long value;
+
+ meminfo_length = len = 0;
+
+ si_meminfo(&info);
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ if (meminfo_enabled[i]) {
+ switch (i) {
+ case MEMINFO_MEMFREE:
+ value = info.freeram * PAGE_SIZE;
+ break;
+ case MEMINFO_MEMUSED:
+ value = (info.totalram - info.freeram) * PAGE_SIZE;
+ break;
+ case MEMINFO_BUFFERRAM:
+ value = info.bufferram * PAGE_SIZE;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ meminfo_buffer[len++] = (unsigned long long)meminfo_key[i];
+ meminfo_buffer[len++] = value;
+ }
+ }
+
+ meminfo_length = len;
+ new_data_avail = true;
+}
+
+static void meminfo_wake_up_handler(unsigned long unused_data)
+{
+ // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+ schedule_work(&work);
+}
+
+static int gator_events_meminfo_read(long long **buffer)
+{
+ static unsigned int last_mem_event = 0;
+
+ if (smp_processor_id() || !meminfo_global_enabled)
+ return 0;
+
+ if (last_mem_event != mem_event) {
+ last_mem_event = mem_event;
+ mod_timer(&meminfo_wake_up_timer, jiffies + 1);
+ }
+
+ if (!new_data_avail)
+ return 0;
+
+ new_data_avail = false;
+
+ if (buffer)
+ *buffer = meminfo_buffer;
+
+ return meminfo_length;
+}
+
+static struct gator_interface gator_events_meminfo_interface = {
+ .create_files = gator_events_meminfo_create_files,
+ .start = gator_events_meminfo_start,
+ .stop = gator_events_meminfo_stop,
+ .read64 = gator_events_meminfo_read,
+};
+
+int gator_events_meminfo_init(void)
+{
+ int i;
+
+ meminfo_global_enabled = 0;
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ meminfo_enabled[i] = 0;
+ meminfo_key[i] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_meminfo_interface);
+}
+
+gator_events_init(gator_events_meminfo_init);
diff --git a/drivers/gator/gator_events_mmaped.c b/drivers/gator/gator_events_mmaped.c
new file mode 100644
index 000000000000..c4cb44f25620
--- /dev/null
+++ b/drivers/gator/gator_events_mmaped.c
@@ -0,0 +1,229 @@
+/*
+ * Example events provider
+ *
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Similar entries to those below must be present in the events.xml file.
+ * To add them to the events.xml, create an events-mmap.xml with the
+ * following contents and rebuild gatord:
+ *
+ * <counter_set name="mmaped_cnt" count="3"/>
+ * <category name="mmaped" counter_set="mmaped_cnt" per_cpu="no">
+ * <event event="0x0" title="Simulated" name="Sine" display="maximum" average_selection="yes" description="Sort-of-sine"/>
+ * <event event="0x1" title="Simulated" name="Triangle" display="maximum" average_selection="yes" description="Triangular wave"/>
+ * <event event="0x2" title="Simulated" name="PWM" display="maximum" average_selection="yes" description="PWM Signal"/>
+ * </category>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+
+#include "gator.h"
+
+#define MMAPED_COUNTERS_NUM 3
+
+static struct {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long key;
+} mmaped_counters[MMAPED_COUNTERS_NUM];
+
+static int mmaped_buffer[MMAPED_COUNTERS_NUM * 2];
+
+#ifdef TODO
+static void __iomem *mmaped_base;
+#endif
+
+#ifndef TODO
+static s64 prev_time;
+#endif
+
+/* Adds mmaped_cntX directories and enabled, event, and key files to /dev/gator/events */
+static int gator_events_mmaped_create_files(struct super_block *sb,
+ struct dentry *root)
+{
+ int i;
+
+ for (i = 0; i < MMAPED_COUNTERS_NUM; i++) {
+ char buf[16];
+ struct dentry *dir;
+
+ snprintf(buf, sizeof(buf), "mmaped_cnt%d", i);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (WARN_ON(!dir))
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled",
+ &mmaped_counters[i].enabled);
+ gatorfs_create_ulong(sb, dir, "event",
+ &mmaped_counters[i].event);
+ gatorfs_create_ro_ulong(sb, dir, "key",
+ &mmaped_counters[i].key);
+ }
+
+ return 0;
+}
+
+static int gator_events_mmaped_start(void)
+{
+#ifdef TODO
+ for (i = 0; i < MMAPED_COUNTERS_NUM; i++)
+ writel(mmaped_counters[i].event,
+ mmaped_base + COUNTERS_CONFIG_OFFSET[i]);
+
+ writel(ENABLED, COUNTERS_CONTROL_OFFSET);
+#endif
+
+#ifndef TODO
+ struct timespec ts;
+ getnstimeofday(&ts);
+ prev_time = timespec_to_ns(&ts);
+#endif
+
+ return 0;
+}
+
+static void gator_events_mmaped_stop(void)
+{
+#ifdef TODO
+ writel(DISABLED, COUNTERS_CONTROL_OFFSET);
+#endif
+}
+
+#ifndef TODO
+/* This function "simulates" counters, generating values of fancy
+ * functions like sine or triangle... */
+static int mmaped_simulate(int counter, int delta_in_us)
+{
+ int result = 0;
+
+ switch (counter) {
+ case 0: /* sort-of-sine */
+ {
+ static int t = 0;
+ int x;
+
+ t += delta_in_us;
+ if (t > 2048000)
+ t = 0;
+
+ if (t % 1024000 < 512000)
+ x = 512000 - (t % 512000);
+ else
+ x = t % 512000;
+
+ result = 32 * x / 512000;
+ result = result * result;
+
+ if (t < 1024000)
+ result = 1922 - result;
+ }
+ break;
+ case 1: /* triangle */
+ {
+ static int v, d = 1;
+
+ v = v + d * delta_in_us;
+ if (v < 0) {
+ v = 0;
+ d = 1;
+ } else if (v > 1000000) {
+ v = 1000000;
+ d = -1;
+ }
+
+ result = v;
+ }
+ break;
+ case 2: /* PWM signal */
+ {
+ static int t, dc, x;
+
+ t += delta_in_us;
+ if (t > 1000000)
+ t = 0;
+ if (x / 1000000 != (x + delta_in_us) / 1000000)
+ dc = (dc + 100000) % 1000000;
+ x += delta_in_us;
+
+ result = t < dc ? 0 : 10;
+ }
+ break;
+ }
+
+ return result;
+}
+#endif
+
+static int gator_events_mmaped_read(int **buffer)
+{
+ int i;
+ int len = 0;
+#ifndef TODO
+ int delta_in_us;
+ struct timespec ts;
+ s64 time;
+#endif
+
+ /* System wide counters - read from one core only */
+ if (smp_processor_id())
+ return 0;
+
+#ifndef TODO
+ getnstimeofday(&ts);
+ time = timespec_to_ns(&ts);
+ delta_in_us = (int)(time - prev_time) / 1000;
+ prev_time = time;
+#endif
+
+ for (i = 0; i < MMAPED_COUNTERS_NUM; i++) {
+ if (mmaped_counters[i].enabled) {
+ mmaped_buffer[len++] = mmaped_counters[i].key;
+#ifdef TODO
+ mmaped_buffer[len++] =
+ readl(mmaped_base + COUNTERS_VALUE_OFFSET[i]);
+#else
+ mmaped_buffer[len++] =
+ mmaped_simulate(mmaped_counters[i].event,
+ delta_in_us);
+#endif
+ }
+ }
+
+ if (buffer)
+ *buffer = mmaped_buffer;
+
+ return len;
+}
+
+static struct gator_interface gator_events_mmaped_interface = {
+ .create_files = gator_events_mmaped_create_files,
+ .start = gator_events_mmaped_start,
+ .stop = gator_events_mmaped_stop,
+ .read = gator_events_mmaped_read,
+};
+
+/* Must not be static! */
+int __init gator_events_mmaped_init(void)
+{
+ int i;
+
+#ifdef TODO
+ mmaped_base = ioremap(COUNTERS_PHYS_ADDR, SZ_4K);
+ if (!mmaped_base)
+ return -ENOMEM;
+#endif
+
+ for (i = 0; i < MMAPED_COUNTERS_NUM; i++) {
+ mmaped_counters[i].enabled = 0;
+ mmaped_counters[i].key = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_mmaped_interface);
+}
+
+gator_events_init(gator_events_mmaped_init);
diff --git a/drivers/gator/gator_events_net.c b/drivers/gator/gator_events_net.c
new file mode 100644
index 000000000000..b6ce06aabb43
--- /dev/null
+++ b/drivers/gator/gator_events_net.c
@@ -0,0 +1,171 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <linux/netdevice.h>
+#include <linux/hardirq.h>
+
+#define NETRX 0
+#define NETTX 1
+#define TOTALNET 2
+
+static ulong netrx_enabled;
+static ulong nettx_enabled;
+static ulong netrx_key;
+static ulong nettx_key;
+static int rx_total, tx_total;
+static ulong netPrev[TOTALNET];
+static int netGet[TOTALNET * 4];
+
+static struct timer_list net_wake_up_timer;
+
+// Must be run in process context as the kernel function dev_get_stats() can sleep
+static void get_network_stats(struct work_struct *wsptr)
+{
+ int rx = 0, tx = 0;
+ struct net_device *dev;
+
+ for_each_netdev(&init_net, dev) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ const struct net_device_stats *stats = dev_get_stats(dev);
+#else
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+#endif
+ rx += stats->rx_bytes;
+ tx += stats->tx_bytes;
+ }
+ rx_total = rx;
+ tx_total = tx;
+}
+
+DECLARE_WORK(wq_get_stats, get_network_stats);
+
+static void net_wake_up_handler(unsigned long unused_data)
+{
+ // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+ schedule_work(&wq_get_stats);
+}
+
+static void calculate_delta(int *rx, int *tx)
+{
+ int rx_calc, tx_calc;
+
+ rx_calc = (int)(rx_total - netPrev[NETRX]);
+ if (rx_calc < 0)
+ rx_calc = 0;
+ netPrev[NETRX] += rx_calc;
+
+ tx_calc = (int)(tx_total - netPrev[NETTX]);
+ if (tx_calc < 0)
+ tx_calc = 0;
+ netPrev[NETTX] += tx_calc;
+
+ *rx = rx_calc;
+ *tx = tx_calc;
+}
+
+static int gator_events_net_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ dir = gatorfs_mkdir(sb, root, "Linux_net_rx");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &netrx_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &netrx_key);
+
+ dir = gatorfs_mkdir(sb, root, "Linux_net_tx");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &nettx_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &nettx_key);
+
+ return 0;
+}
+
+static int gator_events_net_start(void)
+{
+ get_network_stats(0);
+ netPrev[NETRX] = rx_total;
+ netPrev[NETTX] = tx_total;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ setup_timer(&net_wake_up_timer, net_wake_up_handler, 0);
+#else
+ setup_deferrable_timer_on_stack(&net_wake_up_timer, net_wake_up_handler, 0);
+#endif
+ return 0;
+}
+
+static void gator_events_net_stop(void)
+{
+ del_timer_sync(&net_wake_up_timer);
+ netrx_enabled = 0;
+ nettx_enabled = 0;
+}
+
+static int gator_events_net_read(int **buffer)
+{
+ int len, rx_delta, tx_delta;
+ static int last_rx_delta = 0, last_tx_delta = 0;
+
+ if (smp_processor_id() != 0)
+ return 0;
+
+ if (!netrx_enabled && !nettx_enabled)
+ return 0;
+
+ mod_timer(&net_wake_up_timer, jiffies + 1);
+
+ calculate_delta(&rx_delta, &tx_delta);
+
+ len = 0;
+ if (netrx_enabled && last_rx_delta != rx_delta) {
+ last_rx_delta = rx_delta;
+ netGet[len++] = netrx_key;
+ netGet[len++] = 0; // indicates to Streamline that rx_delta bytes were transmitted now, not since the last message
+ netGet[len++] = netrx_key;
+ netGet[len++] = rx_delta;
+ }
+
+ if (nettx_enabled && last_tx_delta != tx_delta) {
+ last_tx_delta = tx_delta;
+ netGet[len++] = nettx_key;
+ netGet[len++] = 0; // indicates to Streamline that tx_delta bytes were transmitted now, not since the last message
+ netGet[len++] = nettx_key;
+ netGet[len++] = tx_delta;
+ }
+
+ if (buffer)
+ *buffer = netGet;
+
+ return len;
+}
+
+static struct gator_interface gator_events_net_interface = {
+ .create_files = gator_events_net_create_files,
+ .start = gator_events_net_start,
+ .stop = gator_events_net_stop,
+ .read = gator_events_net_read,
+};
+
+int gator_events_net_init(void)
+{
+ netrx_key = gator_events_get_key();
+ nettx_key = gator_events_get_key();
+
+ netrx_enabled = 0;
+ nettx_enabled = 0;
+
+ return gator_events_install(&gator_events_net_interface);
+}
+
+gator_events_init(gator_events_net_init);
diff --git a/drivers/gator/gator_events_perf_pmu.c b/drivers/gator/gator_events_perf_pmu.c
new file mode 100644
index 000000000000..ce3a40fc6591
--- /dev/null
+++ b/drivers/gator/gator_events_perf_pmu.c
@@ -0,0 +1,298 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include "gator.h"
+
+// gator_events_armvX.c is used for Linux 2.6.x
+#if GATOR_PERF_PMU_SUPPORT
+
+static const char *pmnc_name;
+int pmnc_counters;
+int ccnt = 0;
+
+#define CNTMAX (6+1)
+
+static DEFINE_MUTEX(perf_mutex);
+
+unsigned long pmnc_enabled[CNTMAX];
+unsigned long pmnc_event[CNTMAX];
+unsigned long pmnc_count[CNTMAX];
+unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX], perfCurr);
+static DEFINE_PER_CPU(int[CNTMAX], perfPrev);
+static DEFINE_PER_CPU(int[CNTMAX], perfPrevDelta);
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+static DEFINE_PER_CPU(struct perf_event *[CNTMAX], pevent);
+static DEFINE_PER_CPU(struct perf_event_attr *[CNTMAX], pevent_attr);
+
+static void gator_events_perf_pmu_stop(void);
+
+static int gator_events_perf_pmu_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < pmnc_counters; i++) {
+ char buf[40];
+ if (i == 0) {
+ snprintf(buf, sizeof buf, "%s_ccnt", pmnc_name);
+ } else {
+ snprintf(buf, sizeof buf, "%s_cnt%d", pmnc_name, i - 1);
+ }
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ulong(sb, dir, "count", &pmnc_count[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i > 0) {
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+ }
+
+ return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+static void ebs_overflow_handler(struct perf_event *event, int unused, struct perf_sample_data *data, struct pt_regs *regs)
+#else
+static void ebs_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+#endif
+{
+ gator_backtrace_handler(regs);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+static void dummy_handler(struct perf_event *event, int unused, struct perf_sample_data *data, struct pt_regs *regs)
+#else
+static void dummy_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+#endif
+{
+// Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll
+}
+
+static int gator_events_perf_pmu_online(int **buffer)
+{
+ int cnt, len = 0, cpu = smp_processor_id();
+
+ // read the counters and toss the invalid data, return zero instead
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ struct perf_event *ev = per_cpu(pevent, cpu)[cnt];
+ if (ev != NULL && ev->state == PERF_EVENT_STATE_ACTIVE) {
+ ev->pmu->read(ev);
+ per_cpu(perfPrev, cpu)[cnt] = per_cpu(perfCurr, cpu)[cnt] = local64_read(&ev->count);
+ per_cpu(perfPrevDelta, cpu)[cnt] = 0;
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static void gator_events_perf_pmu_online_dispatch(int cpu)
+{
+ int cnt;
+ perf_overflow_handler_t handler;
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (per_cpu(pevent, cpu)[cnt] != NULL || per_cpu(pevent_attr, cpu)[cnt] == 0)
+ continue;
+
+ if (pmnc_count[cnt] > 0) {
+ handler = ebs_overflow_handler;
+ } else {
+ handler = dummy_handler;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+ per_cpu(pevent, cpu)[cnt] = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu)[cnt], cpu, 0, handler);
+#else
+ per_cpu(pevent, cpu)[cnt] = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu)[cnt], cpu, 0, handler, 0);
+#endif
+ if (IS_ERR(per_cpu(pevent, cpu)[cnt])) {
+ pr_debug("gator: unable to online a counter on cpu %d\n", cpu);
+ per_cpu(pevent, cpu)[cnt] = NULL;
+ continue;
+ }
+
+ if (per_cpu(pevent, cpu)[cnt]->state != PERF_EVENT_STATE_ACTIVE) {
+ pr_debug("gator: inactive counter on cpu %d\n", cpu);
+ perf_event_release_kernel(per_cpu(pevent, cpu)[cnt]);
+ per_cpu(pevent, cpu)[cnt] = NULL;
+ continue;
+ }
+ }
+}
+
+static void gator_events_perf_pmu_offline_dispatch(int cpu)
+{
+ int cnt;
+ struct perf_event *pe;
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ pe = NULL;
+ mutex_lock(&perf_mutex);
+ if (per_cpu(pevent, cpu)[cnt]) {
+ pe = per_cpu(pevent, cpu)[cnt];
+ per_cpu(pevent, cpu)[cnt] = NULL;
+ }
+ mutex_unlock(&perf_mutex);
+
+ if (pe) {
+ perf_event_release_kernel(pe);
+ }
+ }
+}
+
+static int gator_events_perf_pmu_start(void)
+{
+ int cnt, cpu;
+ u32 size = sizeof(struct perf_event_attr);
+ int found_ebs = false;
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_count[cnt] > 0) {
+ if (!found_ebs) {
+ found_ebs = true;
+ } else {
+ // Only one ebs counter is allowed
+ return -1;
+ }
+ }
+ }
+
+ for_each_present_cpu(cpu) {
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ per_cpu(pevent, cpu)[cnt] = NULL;
+ if (!pmnc_enabled[cnt]) // Skip disabled counters
+ continue;
+
+ per_cpu(perfPrev, cpu)[cnt] = 0;
+ per_cpu(perfCurr, cpu)[cnt] = 0;
+ per_cpu(perfPrevDelta, cpu)[cnt] = 0;
+ per_cpu(pevent_attr, cpu)[cnt] = kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(pevent_attr, cpu)[cnt]) {
+ gator_events_perf_pmu_stop();
+ return -1;
+ }
+
+ memset(per_cpu(pevent_attr, cpu)[cnt], 0, size);
+ per_cpu(pevent_attr, cpu)[cnt]->type = PERF_TYPE_RAW;
+ per_cpu(pevent_attr, cpu)[cnt]->size = size;
+ per_cpu(pevent_attr, cpu)[cnt]->config = pmnc_event[cnt];
+ per_cpu(pevent_attr, cpu)[cnt]->sample_period = pmnc_count[cnt];
+ per_cpu(pevent_attr, cpu)[cnt]->pinned = 1;
+
+ // handle special case for ccnt
+ if (cnt == ccnt) {
+ per_cpu(pevent_attr, cpu)[cnt]->type = PERF_TYPE_HARDWARE;
+ per_cpu(pevent_attr, cpu)[cnt]->config = PERF_COUNT_HW_CPU_CYCLES;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void gator_events_perf_pmu_stop(void)
+{
+ unsigned int cnt, cpu;
+
+ for_each_present_cpu(cpu) {
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (per_cpu(pevent_attr, cpu)[cnt]) {
+ kfree(per_cpu(pevent_attr, cpu)[cnt]);
+ per_cpu(pevent_attr, cpu)[cnt] = NULL;
+ }
+ }
+ }
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_count[cnt] = 0;
+ }
+}
+
+static int gator_events_perf_pmu_read(int **buffer)
+{
+ int cnt, delta, len = 0;
+ int cpu = smp_processor_id();
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ struct perf_event *ev = per_cpu(pevent, cpu)[cnt];
+ if (ev != NULL && ev->state == PERF_EVENT_STATE_ACTIVE) {
+ ev->pmu->read(ev);
+ per_cpu(perfCurr, cpu)[cnt] = local64_read(&ev->count);
+ delta = per_cpu(perfCurr, cpu)[cnt] - per_cpu(perfPrev, cpu)[cnt];
+ if (delta != 0 || delta != per_cpu(perfPrevDelta, cpu)[cnt]) {
+ per_cpu(perfPrevDelta, cpu)[cnt] = delta;
+ per_cpu(perfPrev, cpu)[cnt] = per_cpu(perfCurr, cpu)[cnt];
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ if (delta < 0)
+ delta *= -1;
+ per_cpu(perfCnt, cpu)[len++] = delta;
+ }
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_perf_pmu_interface = {
+ .create_files = gator_events_perf_pmu_create_files,
+ .start = gator_events_perf_pmu_start,
+ .stop = gator_events_perf_pmu_stop,
+ .online = gator_events_perf_pmu_online,
+ .online_dispatch = gator_events_perf_pmu_online_dispatch,
+ .offline_dispatch = gator_events_perf_pmu_offline_dispatch,
+ .read = gator_events_perf_pmu_read,
+};
+
+int gator_events_perf_pmu_init(void)
+{
+ unsigned int cnt;
+ const u32 cpuid = gator_cpuid();
+
+ for (cnt = 0; gator_cpus[cnt].cpuid != 0; ++cnt) {
+ if (gator_cpus[cnt].cpuid == cpuid) {
+ pmnc_name = gator_cpus[cnt].pmnc_name;
+ pmnc_counters = gator_cpus[cnt].pmnc_counters;
+ ccnt = gator_cpus[cnt].ccnt;
+ break;
+ }
+ }
+ if (gator_cpus[cnt].cpuid == 0) {
+ return -1;
+ }
+
+ pmnc_counters++; // CNT[n] + CCNT
+
+ for (cnt = 0; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_count[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_perf_pmu_interface);
+}
+
+gator_events_init(gator_events_perf_pmu_init);
+#endif
diff --git a/drivers/gator/gator_events_sched.c b/drivers/gator/gator_events_sched.c
new file mode 100644
index 000000000000..ba6744d130cb
--- /dev/null
+++ b/drivers/gator/gator_events_sched.c
@@ -0,0 +1,115 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/sched.h>
+
+#define SCHED_SWITCH 0
+#define SCHED_TOTAL (SCHED_SWITCH+1)
+
+static ulong sched_switch_enabled;
+static ulong sched_switch_key;
+static DEFINE_PER_CPU(int[SCHED_TOTAL], schedCnt);
+static DEFINE_PER_CPU(int[SCHED_TOTAL * 2], schedGet);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
+#else
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
+#endif
+{
+ unsigned long flags;
+
+ // disable interrupts to synchronize with gator_events_sched_read()
+ // spinlocks not needed since percpu buffers are used
+ local_irq_save(flags);
+ per_cpu(schedCnt, smp_processor_id())[SCHED_SWITCH]++;
+ local_irq_restore(flags);
+}
+
+static int gator_events_sched_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* switch */
+ dir = gatorfs_mkdir(sb, root, "Linux_sched_switch");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &sched_switch_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &sched_switch_key);
+
+ return 0;
+}
+
+static int gator_events_sched_start(void)
+{
+ // register tracepoints
+ if (sched_switch_enabled)
+ if (GATOR_REGISTER_TRACE(sched_switch))
+ goto sched_switch_exit;
+ pr_debug("gator: registered scheduler event tracepoints\n");
+
+ return 0;
+
+ // unregister tracepoints on error
+sched_switch_exit:
+ pr_err("gator: scheduler event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_sched_stop(void)
+{
+ if (sched_switch_enabled)
+ GATOR_UNREGISTER_TRACE(sched_switch);
+ pr_debug("gator: unregistered scheduler event tracepoints\n");
+
+ sched_switch_enabled = 0;
+}
+
+static int gator_events_sched_read(int **buffer)
+{
+ unsigned long flags;
+ int len, value;
+ int cpu = smp_processor_id();
+
+ len = 0;
+ if (sched_switch_enabled) {
+ local_irq_save(flags);
+ value = per_cpu(schedCnt, cpu)[SCHED_SWITCH];
+ per_cpu(schedCnt, cpu)[SCHED_SWITCH] = 0;
+ local_irq_restore(flags);
+ per_cpu(schedGet, cpu)[len++] = sched_switch_key;
+ per_cpu(schedGet, cpu)[len++] = value;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(schedGet, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_sched_interface = {
+ .create_files = gator_events_sched_create_files,
+ .start = gator_events_sched_start,
+ .stop = gator_events_sched_stop,
+ .read = gator_events_sched_read,
+};
+
+int gator_events_sched_init(void)
+{
+ sched_switch_enabled = 0;
+
+ sched_switch_key = gator_events_get_key();
+
+ return gator_events_install(&gator_events_sched_interface);
+}
+
+gator_events_init(gator_events_sched_init);
diff --git a/drivers/gator/gator_events_scorpion.c b/drivers/gator/gator_events_scorpion.c
new file mode 100644
index 000000000000..5ffc63ab5cb1
--- /dev/null
+++ b/drivers/gator/gator_events_scorpion.c
@@ -0,0 +1,676 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+// gator_events_perf_pmu.c is used if perf is supported
+#if GATOR_NO_PERF_SUPPORT
+
+static const char *pmnc_name;
+static int pmnc_counters;
+
+// Per-CPU PMNC: config reg
+#define PMNC_E (1 << 0) /* Enable all counters */
+#define PMNC_P (1 << 1) /* Reset all counters */
+#define PMNC_C (1 << 2) /* Cycle counter reset */
+#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define PMNC_X (1 << 4) /* Export to ETM */
+#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug */
+#define PMNC_MASK 0x3f /* Mask for writable bits */
+
+// ccnt reg
+#define CCNT_REG (1 << 31)
+
+#define CCNT 0
+#define CNT0 1
+#define CNTMAX (4+1)
+
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+enum scorpion_perf_types {
+ SCORPION_ICACHE_EXPL_INV = 0x4c,
+ SCORPION_ICACHE_MISS = 0x4d,
+ SCORPION_ICACHE_ACCESS = 0x4e,
+ SCORPION_ICACHE_CACHEREQ_L2 = 0x4f,
+ SCORPION_ICACHE_NOCACHE_L2 = 0x50,
+ SCORPION_HIQUP_NOPED = 0x51,
+ SCORPION_DATA_ABORT = 0x52,
+ SCORPION_IRQ = 0x53,
+ SCORPION_FIQ = 0x54,
+ SCORPION_ALL_EXCPT = 0x55,
+ SCORPION_UNDEF = 0x56,
+ SCORPION_SVC = 0x57,
+ SCORPION_SMC = 0x58,
+ SCORPION_PREFETCH_ABORT = 0x59,
+ SCORPION_INDEX_CHECK = 0x5a,
+ SCORPION_NULL_CHECK = 0x5b,
+ SCORPION_EXPL_ICIALLU = 0x5c,
+ SCORPION_IMPL_ICIALLU = 0x5d,
+ SCORPION_NONICIALLU_BTAC_INV = 0x5e,
+ SCORPION_ICIMVAU_IMPL_ICIALLU = 0x5f,
+ SCORPION_SPIPE_ONLY_CYCLES = 0x60,
+ SCORPION_XPIPE_ONLY_CYCLES = 0x61,
+ SCORPION_DUAL_CYCLES = 0x62,
+ SCORPION_DISPATCH_ANY_CYCLES = 0x63,
+ SCORPION_FIFO_FULLBLK_CMT = 0x64,
+ SCORPION_FAIL_COND_INST = 0x65,
+ SCORPION_PASS_COND_INST = 0x66,
+ SCORPION_ALLOW_VU_CLK = 0x67,
+ SCORPION_VU_IDLE = 0x68,
+ SCORPION_ALLOW_L2_CLK = 0x69,
+ SCORPION_L2_IDLE = 0x6a,
+ SCORPION_DTLB_IMPL_INV_SCTLR_DACR = 0x6b,
+ SCORPION_DTLB_EXPL_INV = 0x6c,
+ SCORPION_DTLB_MISS = 0x6d,
+ SCORPION_DTLB_ACCESS = 0x6e,
+ SCORPION_ITLB_MISS = 0x6f,
+ SCORPION_ITLB_IMPL_INV = 0x70,
+ SCORPION_ITLB_EXPL_INV = 0x71,
+ SCORPION_UTLB_D_MISS = 0x72,
+ SCORPION_UTLB_D_ACCESS = 0x73,
+ SCORPION_UTLB_I_MISS = 0x74,
+ SCORPION_UTLB_I_ACCESS = 0x75,
+ SCORPION_UTLB_INV_ASID = 0x76,
+ SCORPION_UTLB_INV_MVA = 0x77,
+ SCORPION_UTLB_INV_ALL = 0x78,
+ SCORPION_S2_HOLD_RDQ_UNAVAIL = 0x79,
+ SCORPION_S2_HOLD = 0x7a,
+ SCORPION_S2_HOLD_DEV_OP = 0x7b,
+ SCORPION_S2_HOLD_ORDER = 0x7c,
+ SCORPION_S2_HOLD_BARRIER = 0x7d,
+ SCORPION_VIU_DUAL_CYCLE = 0x7e,
+ SCORPION_VIU_SINGLE_CYCLE = 0x7f,
+ SCORPION_VX_PIPE_WAR_STALL_CYCLES = 0x80,
+ SCORPION_VX_PIPE_WAW_STALL_CYCLES = 0x81,
+ SCORPION_VX_PIPE_RAW_STALL_CYCLES = 0x82,
+ SCORPION_VX_PIPE_LOAD_USE_STALL = 0x83,
+ SCORPION_VS_PIPE_WAR_STALL_CYCLES = 0x84,
+ SCORPION_VS_PIPE_WAW_STALL_CYCLES = 0x85,
+ SCORPION_VS_PIPE_RAW_STALL_CYCLES = 0x86,
+ SCORPION_EXCEPTIONS_INV_OPERATION = 0x87,
+ SCORPION_EXCEPTIONS_DIV_BY_ZERO = 0x88,
+ SCORPION_COND_INST_FAIL_VX_PIPE = 0x89,
+ SCORPION_COND_INST_FAIL_VS_PIPE = 0x8a,
+ SCORPION_EXCEPTIONS_OVERFLOW = 0x8b,
+ SCORPION_EXCEPTIONS_UNDERFLOW = 0x8c,
+ SCORPION_EXCEPTIONS_DENORM = 0x8d,
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
+ SCORPIONMP_NUM_BARRIERS = 0x8e,
+ SCORPIONMP_BARRIER_CYCLES = 0x8f,
+#else
+ SCORPION_BANK_AB_HIT = 0x8e,
+ SCORPION_BANK_AB_ACCESS = 0x8f,
+ SCORPION_BANK_CD_HIT = 0x90,
+ SCORPION_BANK_CD_ACCESS = 0x91,
+ SCORPION_BANK_AB_DSIDE_HIT = 0x92,
+ SCORPION_BANK_AB_DSIDE_ACCESS = 0x93,
+ SCORPION_BANK_CD_DSIDE_HIT = 0x94,
+ SCORPION_BANK_CD_DSIDE_ACCESS = 0x95,
+ SCORPION_BANK_AB_ISIDE_HIT = 0x96,
+ SCORPION_BANK_AB_ISIDE_ACCESS = 0x97,
+ SCORPION_BANK_CD_ISIDE_HIT = 0x98,
+ SCORPION_BANK_CD_ISIDE_ACCESS = 0x99,
+ SCORPION_ISIDE_RD_WAIT = 0x9a,
+ SCORPION_DSIDE_RD_WAIT = 0x9b,
+ SCORPION_BANK_BYPASS_WRITE = 0x9c,
+ SCORPION_BANK_AB_NON_CASTOUT = 0x9d,
+ SCORPION_BANK_AB_L2_CASTOUT = 0x9e,
+ SCORPION_BANK_CD_NON_CASTOUT = 0x9f,
+ SCORPION_BANK_CD_L2_CASTOUT = 0xa0,
+#endif
+ MSM_MAX_EVT
+};
+
+struct scorp_evt {
+ u32 evt_type;
+ u32 val;
+ u8 grp;
+ u32 evt_type_act;
+};
+
+static const struct scorp_evt sc_evt[] = {
+ {SCORPION_ICACHE_EXPL_INV, 0x80000500, 0, 0x4d},
+ {SCORPION_ICACHE_MISS, 0x80050000, 0, 0x4e},
+ {SCORPION_ICACHE_ACCESS, 0x85000000, 0, 0x4f},
+ {SCORPION_ICACHE_CACHEREQ_L2, 0x86000000, 0, 0x4f},
+ {SCORPION_ICACHE_NOCACHE_L2, 0x87000000, 0, 0x4f},
+ {SCORPION_HIQUP_NOPED, 0x80080000, 0, 0x4e},
+ {SCORPION_DATA_ABORT, 0x8000000a, 0, 0x4c},
+ {SCORPION_IRQ, 0x80000a00, 0, 0x4d},
+ {SCORPION_FIQ, 0x800a0000, 0, 0x4e},
+ {SCORPION_ALL_EXCPT, 0x8a000000, 0, 0x4f},
+ {SCORPION_UNDEF, 0x8000000b, 0, 0x4c},
+ {SCORPION_SVC, 0x80000b00, 0, 0x4d},
+ {SCORPION_SMC, 0x800b0000, 0, 0x4e},
+ {SCORPION_PREFETCH_ABORT, 0x8b000000, 0, 0x4f},
+ {SCORPION_INDEX_CHECK, 0x8000000c, 0, 0x4c},
+ {SCORPION_NULL_CHECK, 0x80000c00, 0, 0x4d},
+ {SCORPION_EXPL_ICIALLU, 0x8000000d, 0, 0x4c},
+ {SCORPION_IMPL_ICIALLU, 0x80000d00, 0, 0x4d},
+ {SCORPION_NONICIALLU_BTAC_INV, 0x800d0000, 0, 0x4e},
+ {SCORPION_ICIMVAU_IMPL_ICIALLU, 0x8d000000, 0, 0x4f},
+
+ {SCORPION_SPIPE_ONLY_CYCLES, 0x80000600, 1, 0x51},
+ {SCORPION_XPIPE_ONLY_CYCLES, 0x80060000, 1, 0x52},
+ {SCORPION_DUAL_CYCLES, 0x86000000, 1, 0x53},
+ {SCORPION_DISPATCH_ANY_CYCLES, 0x89000000, 1, 0x53},
+ {SCORPION_FIFO_FULLBLK_CMT, 0x8000000d, 1, 0x50},
+ {SCORPION_FAIL_COND_INST, 0x800d0000, 1, 0x52},
+ {SCORPION_PASS_COND_INST, 0x8d000000, 1, 0x53},
+ {SCORPION_ALLOW_VU_CLK, 0x8000000e, 1, 0x50},
+ {SCORPION_VU_IDLE, 0x80000e00, 1, 0x51},
+ {SCORPION_ALLOW_L2_CLK, 0x800e0000, 1, 0x52},
+ {SCORPION_L2_IDLE, 0x8e000000, 1, 0x53},
+
+ {SCORPION_DTLB_IMPL_INV_SCTLR_DACR, 0x80000001, 2, 0x54},
+ {SCORPION_DTLB_EXPL_INV, 0x80000100, 2, 0x55},
+ {SCORPION_DTLB_MISS, 0x80010000, 2, 0x56},
+ {SCORPION_DTLB_ACCESS, 0x81000000, 2, 0x57},
+ {SCORPION_ITLB_MISS, 0x80000200, 2, 0x55},
+ {SCORPION_ITLB_IMPL_INV, 0x80020000, 2, 0x56},
+ {SCORPION_ITLB_EXPL_INV, 0x82000000, 2, 0x57},
+ {SCORPION_UTLB_D_MISS, 0x80000003, 2, 0x54},
+ {SCORPION_UTLB_D_ACCESS, 0x80000300, 2, 0x55},
+ {SCORPION_UTLB_I_MISS, 0x80030000, 2, 0x56},
+ {SCORPION_UTLB_I_ACCESS, 0x83000000, 2, 0x57},
+ {SCORPION_UTLB_INV_ASID, 0x80000400, 2, 0x55},
+ {SCORPION_UTLB_INV_MVA, 0x80040000, 2, 0x56},
+ {SCORPION_UTLB_INV_ALL, 0x84000000, 2, 0x57},
+ {SCORPION_S2_HOLD_RDQ_UNAVAIL, 0x80000800, 2, 0x55},
+ {SCORPION_S2_HOLD, 0x88000000, 2, 0x57},
+ {SCORPION_S2_HOLD_DEV_OP, 0x80000900, 2, 0x55},
+ {SCORPION_S2_HOLD_ORDER, 0x80090000, 2, 0x56},
+ {SCORPION_S2_HOLD_BARRIER, 0x89000000, 2, 0x57},
+
+ {SCORPION_VIU_DUAL_CYCLE, 0x80000001, 4, 0x5c},
+ {SCORPION_VIU_SINGLE_CYCLE, 0x80000100, 4, 0x5d},
+ {SCORPION_VX_PIPE_WAR_STALL_CYCLES, 0x80000005, 4, 0x5c},
+ {SCORPION_VX_PIPE_WAW_STALL_CYCLES, 0x80000500, 4, 0x5d},
+ {SCORPION_VX_PIPE_RAW_STALL_CYCLES, 0x80050000, 4, 0x5e},
+ {SCORPION_VX_PIPE_LOAD_USE_STALL, 0x80000007, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAR_STALL_CYCLES, 0x80000008, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAW_STALL_CYCLES, 0x80000800, 4, 0x5d},
+ {SCORPION_VS_PIPE_RAW_STALL_CYCLES, 0x80080000, 4, 0x5e},
+ {SCORPION_EXCEPTIONS_INV_OPERATION, 0x8000000b, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_DIV_BY_ZERO, 0x80000b00, 4, 0x5d},
+ {SCORPION_COND_INST_FAIL_VX_PIPE, 0x800b0000, 4, 0x5e},
+ {SCORPION_COND_INST_FAIL_VS_PIPE, 0x8b000000, 4, 0x5f},
+ {SCORPION_EXCEPTIONS_OVERFLOW, 0x8000000c, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_UNDERFLOW, 0x80000c00, 4, 0x5d},
+ {SCORPION_EXCEPTIONS_DENORM, 0x8c000000, 4, 0x5f},
+
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
+ {SCORPIONMP_NUM_BARRIERS, 0x80000e00, 3, 0x59},
+ {SCORPIONMP_BARRIER_CYCLES, 0x800e0000, 3, 0x5a},
+#else
+ {SCORPION_BANK_AB_HIT, 0x80000001, 3, 0x58},
+ {SCORPION_BANK_AB_ACCESS, 0x80000100, 3, 0x59},
+ {SCORPION_BANK_CD_HIT, 0x80010000, 3, 0x5a},
+ {SCORPION_BANK_CD_ACCESS, 0x81000000, 3, 0x5b},
+ {SCORPION_BANK_AB_DSIDE_HIT, 0x80000002, 3, 0x58},
+ {SCORPION_BANK_AB_DSIDE_ACCESS, 0x80000200, 3, 0x59},
+ {SCORPION_BANK_CD_DSIDE_HIT, 0x80020000, 3, 0x5a},
+ {SCORPION_BANK_CD_DSIDE_ACCESS, 0x82000000, 3, 0x5b},
+ {SCORPION_BANK_AB_ISIDE_HIT, 0x80000003, 3, 0x58},
+ {SCORPION_BANK_AB_ISIDE_ACCESS, 0x80000300, 3, 0x59},
+ {SCORPION_BANK_CD_ISIDE_HIT, 0x80030000, 3, 0x5a},
+ {SCORPION_BANK_CD_ISIDE_ACCESS, 0x83000000, 3, 0x5b},
+ {SCORPION_ISIDE_RD_WAIT, 0x80000009, 3, 0x58},
+ {SCORPION_DSIDE_RD_WAIT, 0x80090000, 3, 0x5a},
+ {SCORPION_BANK_BYPASS_WRITE, 0x8000000a, 3, 0x58},
+ {SCORPION_BANK_AB_NON_CASTOUT, 0x8000000c, 3, 0x58},
+ {SCORPION_BANK_AB_L2_CASTOUT, 0x80000c00, 3, 0x59},
+ {SCORPION_BANK_CD_NON_CASTOUT, 0x800c0000, 3, 0x5a},
+ {SCORPION_BANK_CD_L2_CASTOUT, 0x8c000000, 3, 0x5b},
+#endif
+};
+
+static inline void scorpion_pmnc_write(u32 val)
+{
+ val &= PMNC_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+static inline u32 scorpion_pmnc_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_ccnt_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_cntn_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_pmnc_enable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u enabling wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ if (cnt == CCNT)
+ val = CCNT_REG;
+ else
+ val = (1 << (cnt - CNT0));
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+
+ return cnt;
+}
+
+static inline u32 scorpion_pmnc_disable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u disabling wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ if (cnt == CCNT)
+ val = CCNT_REG;
+ else
+ val = (1 << (cnt - CNT0));
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+
+ return cnt;
+}
+
+static inline int scorpion_pmnc_select_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if ((cnt == CCNT) || (cnt >= CNTMAX)) {
+ pr_err("gator: CPU%u selecting wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ val = (cnt - CNT0);
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+
+ return cnt;
+}
+
+static u32 scorpion_read_lpm0(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm0(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm1(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm1(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm2(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm2(u32 val)
+{
+ asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_l2lpm(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_l2lpm(u32 val)
+{
+ asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_vlpm(void)
+{
+ u32 val;
+ asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_vlpm(u32 val)
+{
+ asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
+}
+
+struct scorpion_access_funcs {
+ u32(*read)(void);
+ void (*write)(u32);
+};
+
+struct scorpion_access_funcs scor_func[] = {
+ {scorpion_read_lpm0, scorpion_write_lpm0},
+ {scorpion_read_lpm1, scorpion_write_lpm1},
+ {scorpion_read_lpm2, scorpion_write_lpm2},
+ {scorpion_read_l2lpm, scorpion_write_l2lpm},
+ {scorpion_read_vlpm, scorpion_write_vlpm},
+};
+
+u32 venum_orig_val;
+u32 fp_orig_val;
+
+static void scorpion_pre_vlpm(void)
+{
+ u32 venum_new_val;
+ u32 fp_new_val;
+
+ /* CPACR Enable CP10 access */
+ asm volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (venum_orig_val));
+ venum_new_val = venum_orig_val | 0x00300000;
+ asm volatile("mcr p15, 0, %0, c1, c0, 2" : : "r" (venum_new_val));
+ /* Enable FPEXC */
+ asm volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (fp_orig_val));
+ fp_new_val = fp_orig_val | 0x40000000;
+ asm volatile("mcr p10, 7, %0, c8, c0, 0" : : "r" (fp_new_val));
+}
+
+static void scorpion_post_vlpm(void)
+{
+ /* Restore FPEXC */
+ asm volatile("mcr p10, 7, %0, c8, c0, 0" : : "r" (fp_orig_val));
+ /* Restore CPACR */
+ asm volatile("mcr p15, 0, %0, c1, c0, 2" : : "r" (venum_orig_val));
+}
+
+#define COLMN0MASK 0x000000ff
+#define COLMN1MASK 0x0000ff00
+#define COLMN2MASK 0x00ff0000
+static u32 scorpion_get_columnmask(u32 setval)
+{
+ if (setval & COLMN0MASK)
+ return 0xffffff00;
+ else if (setval & COLMN1MASK)
+ return 0xffff00ff;
+ else if (setval & COLMN2MASK)
+ return 0xff00ffff;
+ else
+ return 0x80ffffff;
+}
+
+static void scorpion_evt_setup(u32 gr, u32 setval)
+{
+ u32 val;
+ if (gr == 4)
+ scorpion_pre_vlpm();
+ val = scorpion_get_columnmask(setval) & scor_func[gr].read();
+ val = val | setval;
+ scor_func[gr].write(val);
+ if (gr == 4)
+ scorpion_post_vlpm();
+}
+
+static int get_scorpion_evtinfo(unsigned int evt_type, struct scorp_evt *evtinfo)
+{
+ u32 idx;
+ if ((evt_type < 0x4c) || (evt_type >= MSM_MAX_EVT))
+ return 0;
+ idx = evt_type - 0x4c;
+ if (sc_evt[idx].evt_type == evt_type) {
+ evtinfo->val = sc_evt[idx].val;
+ evtinfo->grp = sc_evt[idx].grp;
+ evtinfo->evt_type_act = sc_evt[idx].evt_type_act;
+ return 1;
+ }
+ return 0;
+}
+
+static inline void scorpion_pmnc_write_evtsel(unsigned int cnt, u32 val)
+{
+ if (scorpion_pmnc_select_counter(cnt) == cnt) {
+ if (val < 0x40) {
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+ } else {
+ u32 zero = 0;
+ struct scorp_evt evtinfo;
+ // extract evtinfo.grp and evtinfo.tevt_type_act from val
+ if (get_scorpion_evtinfo(val, &evtinfo) == 0)
+ return;
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (evtinfo.evt_type_act));
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (zero));
+ scorpion_evt_setup(evtinfo.grp, val);
+ }
+ }
+}
+
+static void scorpion_pmnc_reset_counter(unsigned int cnt)
+{
+ u32 val = 0;
+
+ if (cnt == CCNT) {
+ scorpion_pmnc_disable_counter(cnt);
+
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
+
+ if (pmnc_enabled[cnt] != 0)
+ scorpion_pmnc_enable_counter(cnt);
+
+ } else if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u resetting wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ } else {
+ scorpion_pmnc_disable_counter(cnt);
+
+ if (scorpion_pmnc_select_counter(cnt) == cnt)
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
+
+ if (pmnc_enabled[cnt] != 0)
+ scorpion_pmnc_enable_counter(cnt);
+ }
+}
+
+static int gator_events_scorpion_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < pmnc_counters; i++) {
+ char buf[40];
+ if (i == 0) {
+ snprintf(buf, sizeof buf, "%s_ccnt", pmnc_name);
+ } else {
+ snprintf(buf, sizeof buf, "%s_cnt%d", pmnc_name, i - 1);
+ }
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i > 0) {
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int gator_events_scorpion_online(int **buffer)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+
+ if (scorpion_pmnc_read() & PMNC_E) {
+ scorpion_pmnc_write(scorpion_pmnc_read() & ~PMNC_E);
+ }
+
+ /* Initialize & Reset PMNC: C bit and P bit */
+ scorpion_pmnc_write(PMNC_P | PMNC_C);
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ // disable counter
+ scorpion_pmnc_disable_counter(cnt);
+
+ event = pmnc_event[cnt] & 255;
+
+ // Set event (if destined for PMNx counters), We don't need to set the event if it's a cycle count
+ if (cnt != CCNT)
+ scorpion_pmnc_write_evtsel(cnt, event);
+
+ // reset counter
+ scorpion_pmnc_reset_counter(cnt);
+
+ // Enable counter, do not enable interrupt for this counter
+ scorpion_pmnc_enable_counter(cnt);
+ }
+
+ // enable
+ scorpion_pmnc_write(scorpion_pmnc_read() | PMNC_E);
+
+ // read the counters and toss the invalid data, return zero instead
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ if (cnt == CCNT) {
+ scorpion_ccnt_read();
+ } else if (scorpion_pmnc_select_counter(cnt) == cnt) {
+ scorpion_cntn_read();
+ }
+ scorpion_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_scorpion_offline(int **buffer)
+{
+ scorpion_pmnc_write(scorpion_pmnc_read() & ~PMNC_E);
+ return 0;
+}
+
+static void gator_events_scorpion_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_scorpion_read(int **buffer)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ // a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled
+ if (!(scorpion_pmnc_read() & PMNC_E)) {
+ return 0;
+ }
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ int value;
+ if (cnt == CCNT) {
+ value = scorpion_ccnt_read();
+ } else if (scorpion_pmnc_select_counter(cnt) == cnt) {
+ value = scorpion_cntn_read();
+ } else {
+ value = 0;
+ }
+ scorpion_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_scorpion_interface = {
+ .create_files = gator_events_scorpion_create_files,
+ .stop = gator_events_scorpion_stop,
+ .online = gator_events_scorpion_online,
+ .offline = gator_events_scorpion_offline,
+ .read = gator_events_scorpion_read,
+};
+
+int gator_events_scorpion_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case SCORPION:
+ pmnc_name = "Scorpion";
+ pmnc_counters = 4;
+ break;
+ case SCORPIONMP:
+ pmnc_name = "ScorpionMP";
+ pmnc_counters = 4;
+ break;
+ default:
+ return -1;
+ }
+
+ pmnc_counters++; // CNT[n] + CCNT
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_scorpion_interface);
+}
+
+gator_events_init(gator_events_scorpion_init);
+
+#else
+int gator_events_scorpion_init(void)
+{
+ return -1;
+}
+#endif
diff --git a/drivers/gator/gator_fs.c b/drivers/gator/gator_fs.c
new file mode 100644
index 000000000000..9ff118b0266d
--- /dev/null
+++ b/drivers/gator/gator_fs.c
@@ -0,0 +1,295 @@
+/**
+ * @file gatorfs.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ *
+ * A simple filesystem for configuration and
+ * access of oprofile.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <asm/uaccess.h>
+
+#define gatorfs_MAGIC 0x24051020
+#define TMPBUFSIZE 50
+DEFINE_SPINLOCK(gatorfs_lock);
+
+static struct inode *gatorfs_get_inode(struct super_block *sb, int mode)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
+ inode->i_ino = get_next_ino();
+#endif
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ }
+ return inode;
+}
+
+static const struct super_operations s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+ssize_t gatorfs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
+{
+ return simple_read_from_buffer(buf, count, offset, str, strlen(str));
+}
+
+ssize_t gatorfs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
+{
+ char tmpbuf[TMPBUFSIZE];
+ size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+ if (maxlen > TMPBUFSIZE)
+ maxlen = TMPBUFSIZE;
+ return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+}
+
+int gatorfs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
+{
+ char tmpbuf[TMPBUFSIZE];
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (count > TMPBUFSIZE - 1)
+ return -EINVAL;
+
+ memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
+
+ spin_lock_irqsave(&gatorfs_lock, flags);
+ *val = simple_strtoul(tmpbuf, NULL, 0);
+ spin_unlock_irqrestore(&gatorfs_lock, flags);
+ return 0;
+}
+
+static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long *val = file->private_data;
+ return gatorfs_ulong_to_user(*val, buf, count, offset);
+}
+
+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long *value = file->private_data;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(value, buf, count);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static int default_open(struct inode *inode, struct file *filp)
+{
+ if (inode->i_private)
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations ulong_fops = {
+ .read = ulong_read_file,
+ .write = ulong_write_file,
+ .open = default_open,
+};
+
+static const struct file_operations ulong_ro_fops = {
+ .read = ulong_read_file,
+ .open = default_open,
+};
+
+static struct dentry *__gatorfs_create_file(struct super_block *sb,
+ struct dentry *root,
+ char const *name,
+ const struct file_operations *fops,
+ int perm)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ return NULL;
+ inode = gatorfs_get_inode(sb, S_IFREG | perm);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+ inode->i_fop = fops;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+int gatorfs_create_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &ulong_fops, 0644);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &ulong_ro_fops, 0444);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ atomic_t *val = file->private_data;
+ return gatorfs_ulong_to_user(atomic_read(val), buf, count, offset);
+}
+
+static const struct file_operations atomic_ro_fops = {
+ .read = atomic_read_file,
+ .open = default_open,
+};
+
+int gatorfs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+ char const *name, atomic_t *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+int gatorfs_create_file(struct super_block *sb, struct dentry *root,
+ char const *name, const struct file_operations *fops)
+{
+ if (!__gatorfs_create_file(sb, root, name, fops, 0644))
+ return -EFAULT;
+ return 0;
+}
+
+int gatorfs_create_file_perm(struct super_block *sb, struct dentry *root,
+ char const *name,
+ const struct file_operations *fops, int perm)
+{
+ if (!__gatorfs_create_file(sb, root, name, fops, perm))
+ return -EFAULT;
+ return 0;
+}
+
+struct dentry *gatorfs_mkdir(struct super_block *sb,
+ struct dentry *root, char const *name)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ return NULL;
+ inode = gatorfs_get_inode(sb, S_IFDIR | 0755);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+static int gatorfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode;
+ struct dentry *root_dentry;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = gatorfs_MAGIC;
+ sb->s_op = &s_ops;
+ sb->s_time_gran = 1;
+
+ root_inode = gatorfs_get_inode(sb, S_IFDIR | 0755);
+ if (!root_inode)
+ return -ENOMEM;
+ root_inode->i_op = &simple_dir_inode_operations;
+ root_inode->i_fop = &simple_dir_operations;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+ root_dentry = d_alloc_root(root_inode);
+#else
+ root_dentry = d_make_root(root_inode);
+#endif
+
+ if (!root_dentry) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+ iput(root_inode);
+#endif
+ return -ENOMEM;
+ }
+
+ sb->s_root = root_dentry;
+
+ gator_op_create_files(sb, root_dentry);
+
+ return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+static int gatorfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_single(fs_type, flags, data, gatorfs_fill_super, mnt);
+}
+#else
+static struct dentry *gatorfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_nodev(fs_type, flags, data, gatorfs_fill_super);
+}
+#endif
+
+static struct file_system_type gatorfs_type = {
+ .owner = THIS_MODULE,
+ .name = "gatorfs",
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+ .get_sb = gatorfs_get_sb,
+#else
+ .mount = gatorfs_mount,
+#endif
+
+ .kill_sb = kill_litter_super,
+};
+
+int __init gatorfs_register(void)
+{
+ return register_filesystem(&gatorfs_type);
+}
+
+void gatorfs_unregister(void)
+{
+ unregister_filesystem(&gatorfs_type);
+}
diff --git a/drivers/gator/gator_hrtimer_gator.c b/drivers/gator/gator_hrtimer_gator.c
new file mode 100644
index 000000000000..846fba44cff2
--- /dev/null
+++ b/drivers/gator/gator_hrtimer_gator.c
@@ -0,0 +1,97 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+// gator_hrtimer_perf.c is used if perf is supported
+// update, gator_hrtimer_gator.c always used until issues resolved with perf hrtimers
+#if 1
+
+void (*callback)(void);
+DEFINE_PER_CPU(struct hrtimer, percpu_hrtimer);
+DEFINE_PER_CPU(int, hrtimer_is_active);
+static ktime_t profiling_interval;
+static void gator_hrtimer_online(int cpu);
+static void gator_hrtimer_offline(int cpu);
+
+static enum hrtimer_restart gator_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ hrtimer_forward_now(hrtimer, profiling_interval);
+ (*callback)();
+ return HRTIMER_RESTART;
+}
+
+static void gator_hrtimer_switch_cpus_online(void *unused)
+{
+ gator_hrtimer_online(smp_processor_id());
+}
+
+static void gator_hrtimer_online(int cpu)
+{
+ struct hrtimer *hrtimer = &per_cpu(percpu_hrtimer, cpu);
+
+ if (cpu != smp_processor_id()) {
+ smp_call_function_single(cpu, gator_hrtimer_switch_cpus_online, NULL, 1);
+ return;
+ }
+
+ if (per_cpu(hrtimer_is_active, cpu) || profiling_interval.tv64 == 0)
+ return;
+
+ per_cpu(hrtimer_is_active, cpu) = 1;
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = gator_hrtimer_notify;
+ hrtimer_start(hrtimer, profiling_interval, HRTIMER_MODE_REL_PINNED);
+}
+
+static void gator_hrtimer_switch_cpus_offline(void *unused)
+{
+ gator_hrtimer_offline(smp_processor_id());
+}
+
+static void gator_hrtimer_offline(int cpu)
+{
+ struct hrtimer *hrtimer = &per_cpu(percpu_hrtimer, cpu);
+
+ if (cpu != smp_processor_id()) {
+ smp_call_function_single(cpu, gator_hrtimer_switch_cpus_offline, NULL, 1);
+ return;
+ }
+
+ if (!per_cpu(hrtimer_is_active, cpu))
+ return;
+
+ per_cpu(hrtimer_is_active, cpu) = 0;
+ hrtimer_cancel(hrtimer);
+}
+
+static int gator_hrtimer_init(int interval, void (*func)(void))
+{
+ int cpu;
+
+ (callback) = (func);
+
+ for_each_present_cpu(cpu) {
+ per_cpu(hrtimer_is_active, cpu) = 0;
+ }
+
+ // calculate profiling interval
+ if (interval > 0) {
+ profiling_interval = ns_to_ktime(1000000000UL / interval);
+ } else {
+ profiling_interval.tv64 = 0;
+ }
+
+ return 0;
+}
+
+static void gator_hrtimer_shutdown(void)
+{
+ /* empty */
+}
+
+#endif
diff --git a/drivers/gator/gator_hrtimer_perf.c b/drivers/gator/gator_hrtimer_perf.c
new file mode 100644
index 000000000000..7c0333f64453
--- /dev/null
+++ b/drivers/gator/gator_hrtimer_perf.c
@@ -0,0 +1,113 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+// gator_hrtimer_gator.c is used if perf is not supported
+// update, gator_hrtimer_gator.c always used until issues resolved with perf hrtimers
+#if 0
+
+// Note: perf Cortex support added in 2.6.35 and PERF_COUNT_SW_CPU_CLOCK/hrtimer broken on 2.6.35 and 2.6.36
+// not relevant as this code is not active until 3.0.0, but wanted to document the issue
+
+void (*callback)(void);
+static int profiling_interval;
+static DEFINE_PER_CPU(struct perf_event *, perf_hrtimer);
+static DEFINE_PER_CPU(struct perf_event_attr *, perf_hrtimer_attr);
+
+static void gator_hrtimer_shutdown(void);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+static void hrtimer_overflow_handler(struct perf_event *event, int unused, struct perf_sample_data *data, struct pt_regs *regs)
+#else
+static void hrtimer_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+#endif
+{
+ (*callback)();
+}
+
+static int gator_online_single_hrtimer(int cpu)
+{
+ if (per_cpu(perf_hrtimer, cpu) != 0 || per_cpu(perf_hrtimer_attr, cpu) == 0)
+ return 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+ per_cpu(perf_hrtimer, cpu) = perf_event_create_kernel_counter(per_cpu(perf_hrtimer_attr, cpu), cpu, 0, hrtimer_overflow_handler);
+#else
+ per_cpu(perf_hrtimer, cpu) = perf_event_create_kernel_counter(per_cpu(perf_hrtimer_attr, cpu), cpu, 0, hrtimer_overflow_handler, 0);
+#endif
+ if (IS_ERR(per_cpu(perf_hrtimer, cpu))) {
+ per_cpu(perf_hrtimer, cpu) = NULL;
+ return -1;
+ }
+
+ if (per_cpu(perf_hrtimer, cpu)->state != PERF_EVENT_STATE_ACTIVE) {
+ perf_event_release_kernel(per_cpu(perf_hrtimer, cpu));
+ per_cpu(perf_hrtimer, cpu) = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void gator_hrtimer_online(int cpu)
+{
+ if (gator_online_single_hrtimer(cpu) < 0) {
+ pr_debug("gator: unable to online the hrtimer on cpu%d\n", cpu);
+ }
+}
+
+static void gator_hrtimer_offline(int cpu)
+{
+ if (per_cpu(perf_hrtimer, cpu)) {
+ perf_event_release_kernel(per_cpu(perf_hrtimer, cpu));
+ per_cpu(perf_hrtimer, cpu) = NULL;
+ }
+}
+
+static int gator_hrtimer_init(int interval, void (*func)(void))
+{
+ u32 size = sizeof(struct perf_event_attr);
+ int cpu;
+
+ callback = func;
+
+ // calculate profiling interval
+ profiling_interval = 1000000000 / interval;
+
+ for_each_present_cpu(cpu) {
+ per_cpu(perf_hrtimer, cpu) = 0;
+ per_cpu(perf_hrtimer_attr, cpu) = kmalloc(size, GFP_KERNEL);
+ if (per_cpu(perf_hrtimer_attr, cpu) == 0) {
+ gator_hrtimer_shutdown();
+ return -1;
+ }
+
+ memset(per_cpu(perf_hrtimer_attr, cpu), 0, size);
+ per_cpu(perf_hrtimer_attr, cpu)->type = PERF_TYPE_SOFTWARE;
+ per_cpu(perf_hrtimer_attr, cpu)->size = size;
+ per_cpu(perf_hrtimer_attr, cpu)->config = PERF_COUNT_SW_CPU_CLOCK;
+ per_cpu(perf_hrtimer_attr, cpu)->sample_period = profiling_interval;
+ per_cpu(perf_hrtimer_attr, cpu)->pinned = 1;
+ }
+
+ return 0;
+}
+
+static void gator_hrtimer_shutdown(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (per_cpu(perf_hrtimer_attr, cpu)) {
+ kfree(per_cpu(perf_hrtimer_attr, cpu));
+ per_cpu(perf_hrtimer_attr, cpu) = NULL;
+ }
+ }
+}
+
+#endif
diff --git a/drivers/gator/gator_main.c b/drivers/gator/gator_main.c
new file mode 100644
index 000000000000..8c35caa8ab5a
--- /dev/null
+++ b/drivers/gator/gator_main.c
@@ -0,0 +1,1254 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+static unsigned long gator_protocol_version = 12;
+
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <asm/stacktrace.h>
+#include <asm/uaccess.h>
+
+#include "gator.h"
+#include "gator_events.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
+#error kernels prior to 2.6.32 are not supported
+#endif
+
+#if !defined(CONFIG_GENERIC_TRACER) && !defined(CONFIG_TRACING)
+#error gator requires the kernel to have CONFIG_GENERIC_TRACER or CONFIG_TRACING defined
+#endif
+
+#ifndef CONFIG_PROFILING
+#error gator requires the kernel to have CONFIG_PROFILING defined
+#endif
+
+#ifndef CONFIG_HIGH_RES_TIMERS
+#error gator requires the kernel to have CONFIG_HIGH_RES_TIMERS defined to support PC sampling
+#endif
+
+#if defined(__arm__) && defined(CONFIG_SMP) && !defined(CONFIG_LOCAL_TIMERS)
+#error gator requires the kernel to have CONFIG_LOCAL_TIMERS defined on SMP systems
+#endif
+
+#if (GATOR_PERF_SUPPORT) && (!(GATOR_PERF_PMU_SUPPORT))
+#ifndef CONFIG_PERF_EVENTS
+#warning gator requires the kernel to have CONFIG_PERF_EVENTS defined to support pmu hardware counters
+#elif !defined CONFIG_HW_PERF_EVENTS
+#warning gator requires the kernel to have CONFIG_HW_PERF_EVENTS defined to support pmu hardware counters
+#endif
+#endif
+
+/******************************************************************************
+ * DEFINES
+ ******************************************************************************/
+#define SUMMARY_BUFFER_SIZE (1*1024)
+#define BACKTRACE_BUFFER_SIZE (128*1024)
+#define NAME_BUFFER_SIZE (64*1024)
+#define COUNTER_BUFFER_SIZE (64*1024) // counters have the core as part of the data and the core value in the frame header may be discarded
+#define BLOCK_COUNTER_BUFFER_SIZE (128*1024)
+#define ANNOTATE_BUFFER_SIZE (64*1024) // annotate counters have the core as part of the data and the core value in the frame header may be discarded
+#define SCHED_TRACE_BUFFER_SIZE (128*1024)
+#define GPU_TRACE_BUFFER_SIZE (64*1024) // gpu trace counters have the core as part of the data and the core value in the frame header may be discarded
+#define IDLE_BUFFER_SIZE (32*1024) // idle counters have the core as part of the data and the core value in the frame header may be discarded
+
+#define NO_COOKIE 0U
+#define INVALID_COOKIE ~0U
+
+#define FRAME_SUMMARY 1
+#define FRAME_BACKTRACE 2
+#define FRAME_NAME 3
+#define FRAME_COUNTER 4
+#define FRAME_BLOCK_COUNTER 5
+#define FRAME_ANNOTATE 6
+#define FRAME_SCHED_TRACE 7
+#define FRAME_GPU_TRACE 8
+#define FRAME_IDLE 9
+
+#define MESSAGE_END_BACKTRACE 1
+
+#define MESSAGE_COOKIE 1
+#define MESSAGE_THREAD_NAME 2
+#define HRTIMER_CORE_NAME 3
+
+#define MESSAGE_GPU_START 1
+#define MESSAGE_GPU_STOP 2
+
+#define MESSAGE_SCHED_SWITCH 1
+#define MESSAGE_SCHED_EXIT 2
+
+#define MAXSIZE_PACK32 5
+#define MAXSIZE_PACK64 10
+
+#if defined(__arm__)
+#define PC_REG regs->ARM_pc
+#elif defined(__aarch64__)
+#define PC_REG regs->pc
+#else
+#define PC_REG regs->ip
+#endif
+
+enum {
+ SUMMARY_BUF,
+ BACKTRACE_BUF,
+ NAME_BUF,
+ COUNTER_BUF,
+ BLOCK_COUNTER_BUF,
+ ANNOTATE_BUF,
+ SCHED_TRACE_BUF,
+ GPU_TRACE_BUF,
+ IDLE_BUF,
+ NUM_GATOR_BUFS
+};
+
+/******************************************************************************
+ * Globals
+ ******************************************************************************/
+static unsigned long gator_cpu_cores;
+// Size of the largest buffer. Effectively constant, set in gator_op_create_files
+static unsigned long userspace_buffer_size;
+static unsigned long gator_backtrace_depth;
+
+static unsigned long gator_started;
+static uint64_t monotonic_started;
+static unsigned long gator_buffer_opened;
+static unsigned long gator_timer_count;
+static unsigned long gator_response_type;
+static DEFINE_MUTEX(start_mutex);
+static DEFINE_MUTEX(gator_buffer_mutex);
+
+static DECLARE_WAIT_QUEUE_HEAD(gator_buffer_wait);
+static struct timer_list gator_buffer_wake_up_timer;
+static LIST_HEAD(gator_events);
+
+/******************************************************************************
+ * Prototypes
+ ******************************************************************************/
+static void buffer_check(int cpu, int buftype);
+static int buffer_bytes_available(int cpu, int buftype);
+static bool buffer_check_space(int cpu, int buftype, int bytes);
+static int contiguous_space_available(int cpu, int bufytpe);
+static void gator_buffer_write_packed_int(int cpu, int buftype, unsigned int x);
+static void gator_buffer_write_packed_int64(int cpu, int buftype, unsigned long long x);
+static void gator_buffer_write_bytes(int cpu, int buftype, const char *x, int len);
+static void gator_buffer_write_string(int cpu, int buftype, const char *x);
+static void gator_add_trace(int cpu, unsigned long address);
+static void gator_add_sample(int cpu, struct pt_regs *const regs);
+static uint64_t gator_get_time(void);
+
+// Size of the buffer, must be a power of 2. Effectively constant, set in gator_op_setup.
+static uint32_t gator_buffer_size[NUM_GATOR_BUFS];
+// gator_buffer_size - 1, bitwise and with pos to get offset into the array. Effectively constant, set in gator_op_setup.
+static uint32_t gator_buffer_mask[NUM_GATOR_BUFS];
+// Read position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are read by userspace in userspace_buffer_read
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_read);
+// Write position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are written to the buffer
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_write);
+// Commit position in the buffer. Initialized to zero in gator_op_setup and incremented after a frame is ready to be read by userspace
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_commit);
+// If set to false, decreases the number of bytes returned by buffer_bytes_available. Set in buffer_check_space if no space is remaining. Initialized to true in gator_op_setup
+// This means that if we run out of space, continue to report that no space is available until bytes are read by userspace
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], buffer_space_available);
+// The buffer. Allocated in gator_op_setup
+static DEFINE_PER_CPU(char *[NUM_GATOR_BUFS], gator_buffer);
+
+/******************************************************************************
+ * Application Includes
+ ******************************************************************************/
+#include "gator_marshaling.c"
+#include "gator_hrtimer_perf.c"
+#include "gator_hrtimer_gator.c"
+#include "gator_cookies.c"
+#include "gator_trace_sched.c"
+#include "gator_trace_power.c"
+#include "gator_trace_gpu.c"
+#include "gator_backtrace.c"
+#include "gator_annotate.c"
+#include "gator_fs.c"
+#include "gator_pack.c"
+
+/******************************************************************************
+ * Misc
+ ******************************************************************************/
+
+struct gator_cpu gator_cpus[] = {
+ {
+ .cpuid = ARM1136,
+ .core_name = "ARM1136",
+ .pmnc_name = "ARM_ARM11",
+ .pmnc_counters = 3,
+ .ccnt = 2,
+ },
+ {
+ .cpuid = ARM1156,
+ .core_name = "ARM1156",
+ .pmnc_name = "ARM_ARM11",
+ .pmnc_counters = 3,
+ .ccnt = 2,
+ },
+ {
+ .cpuid = ARM1176,
+ .core_name = "ARM1176",
+ .pmnc_name = "ARM_ARM11",
+ .pmnc_counters = 3,
+ .ccnt = 2,
+ },
+ {
+ .cpuid = ARM11MPCORE,
+ .core_name = "ARM11MPCore",
+ .pmnc_name = "ARM_ARM11MPCore",
+ .pmnc_counters = 3,
+ },
+ {
+ .cpuid = CORTEX_A5,
+ .core_name = "Cortex-A5",
+ .pmnc_name = "ARM_Cortex-A5",
+ .pmnc_counters = 2,
+ },
+ {
+ .cpuid = CORTEX_A7,
+ .core_name = "Cortex-A7",
+ .pmnc_name = "ARM_Cortex-A7",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A8,
+ .core_name = "Cortex-A8",
+ .pmnc_name = "ARM_Cortex-A8",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A9,
+ .core_name = "Cortex-A9",
+ .pmnc_name = "ARM_Cortex-A9",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = CORTEX_A15,
+ .core_name = "Cortex-A15",
+ .pmnc_name = "ARM_Cortex-A15",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = SCORPION,
+ .core_name = "Scorpion",
+ .pmnc_name = "Scorpion",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = SCORPIONMP,
+ .core_name = "ScorpionMP",
+ .pmnc_name = "ScorpionMP",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAITSIM,
+ .core_name = "KraitSIM",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAIT,
+ .core_name = "Krait",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAIT_S4_PRO,
+ .core_name = "Krait S4 Pro",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A53,
+ .core_name = "Cortex-A53",
+ .pmnc_name = "ARM_Cortex-A53",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = CORTEX_A57,
+ .core_name = "Cortex-A57",
+ .pmnc_name = "ARM_Cortex-A57",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = AARCH64,
+ .core_name = "AArch64",
+ .pmnc_name = "ARM_AArch64",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = OTHER,
+ .core_name = "Other",
+ .pmnc_name = "Other",
+ .pmnc_counters = 6,
+ },
+ {}
+};
+
+u32 gator_cpuid(void)
+{
+#if defined(__arm__) || defined(__aarch64__)
+ u32 val;
+#if !defined(__aarch64__)
+ asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (val));
+#else
+ asm volatile("mrs %0, midr_el1" : "=r" (val));
+#endif
+ return (val >> 4) & 0xfff;
+#else
+ return OTHER;
+#endif
+}
+
+static void gator_buffer_wake_up(unsigned long data)
+{
+ wake_up(&gator_buffer_wait);
+}
+
+/******************************************************************************
+ * Commit interface
+ ******************************************************************************/
+static bool buffer_commit_ready(int *cpu, int *buftype)
+{
+ int cpu_x, x;
+ for_each_present_cpu(cpu_x) {
+ for (x = 0; x < NUM_GATOR_BUFS; x++)
+ if (per_cpu(gator_buffer_commit, cpu_x)[x] != per_cpu(gator_buffer_read, cpu_x)[x]) {
+ *cpu = cpu_x;
+ *buftype = x;
+ return true;
+ }
+ }
+ return false;
+}
+
+/******************************************************************************
+ * Buffer management
+ ******************************************************************************/
+static int buffer_bytes_available(int cpu, int buftype)
+{
+ int remaining, filled;
+
+ filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_read, cpu)[buftype];
+ if (filled < 0) {
+ filled += gator_buffer_size[buftype];
+ }
+
+ remaining = gator_buffer_size[buftype] - filled;
+
+ if (per_cpu(buffer_space_available, cpu)[buftype]) {
+ // Give some extra room; also allows space to insert the overflow error packet
+ remaining -= 200;
+ } else {
+ // Hysteresis, prevents multiple overflow messages
+ remaining -= 2000;
+ }
+
+ return remaining;
+}
+
+static int contiguous_space_available(int cpu, int buftype)
+{
+ int remaining = buffer_bytes_available(cpu, buftype);
+ int contiguous = gator_buffer_size[buftype] - per_cpu(gator_buffer_write, cpu)[buftype];
+ if (remaining < contiguous)
+ return remaining;
+ else
+ return contiguous;
+}
+
+static bool buffer_check_space(int cpu, int buftype, int bytes)
+{
+ int remaining = buffer_bytes_available(cpu, buftype);
+
+ if (remaining < bytes) {
+ per_cpu(buffer_space_available, cpu)[buftype] = false;
+ } else {
+ per_cpu(buffer_space_available, cpu)[buftype] = true;
+ }
+
+ return per_cpu(buffer_space_available, cpu)[buftype];
+}
+
+static void gator_buffer_write_bytes(int cpu, int buftype, const char *x, int len)
+{
+ int i;
+ u32 write = per_cpu(gator_buffer_write, cpu)[buftype];
+ u32 mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+
+ for (i = 0; i < len; i++) {
+ buffer[write] = x[i];
+ write = (write + 1) & mask;
+ }
+
+ per_cpu(gator_buffer_write, cpu)[buftype] = write;
+}
+
+static void gator_buffer_write_string(int cpu, int buftype, const char *x)
+{
+ int len = strlen(x);
+ gator_buffer_write_packed_int(cpu, buftype, len);
+ gator_buffer_write_bytes(cpu, buftype, x, len);
+}
+
+static void gator_commit_buffer(int cpu, int buftype)
+{
+ int type_length, commit, length, byte;
+
+ if (!per_cpu(gator_buffer, cpu)[buftype])
+ return;
+
+ // post-populate the length, which does not include the response type length nor the length itself, i.e. only the length of the payload
+ type_length = gator_response_type ? 1 : 0;
+ commit = per_cpu(gator_buffer_commit, cpu)[buftype];
+ length = per_cpu(gator_buffer_write, cpu)[buftype] - commit;
+ if (length < 0) {
+ length += gator_buffer_size[buftype];
+ }
+ length = length - type_length - sizeof(int);
+ for (byte = 0; byte < sizeof(int); byte++) {
+ per_cpu(gator_buffer, cpu)[buftype][(commit + type_length + byte) & gator_buffer_mask[buftype]] = (length >> byte * 8) & 0xFF;
+ }
+
+ per_cpu(gator_buffer_commit, cpu)[buftype] = per_cpu(gator_buffer_write, cpu)[buftype];
+ marshal_frame(cpu, buftype);
+
+ // had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater
+ mod_timer(&gator_buffer_wake_up_timer, jiffies + 1);
+}
+
+static void buffer_check(int cpu, int buftype)
+{
+ int filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_commit, cpu)[buftype];
+ if (filled < 0) {
+ filled += gator_buffer_size[buftype];
+ }
+ if (filled >= ((gator_buffer_size[buftype] * 3) / 4)) {
+ gator_commit_buffer(cpu, buftype);
+ }
+}
+
+static void gator_add_trace(int cpu, unsigned long address)
+{
+ off_t offset = 0;
+ unsigned long cookie = get_address_cookie(cpu, current, address & ~1, &offset);
+
+ if (cookie == NO_COOKIE || cookie == INVALID_COOKIE) {
+ offset = address;
+ }
+
+ marshal_backtrace(offset & ~1, cookie);
+}
+
+static void gator_add_sample(int cpu, struct pt_regs *const regs)
+{
+ bool inKernel;
+ unsigned long exec_cookie;
+
+ if (!regs)
+ return;
+
+ inKernel = !user_mode(regs);
+ exec_cookie = get_exec_cookie(cpu, current);
+
+ if (!marshal_backtrace_header(exec_cookie, current->tgid, current->pid, inKernel))
+ return;
+
+ if (inKernel) {
+ kernel_backtrace(cpu, regs);
+ } else {
+ // Cookie+PC
+ gator_add_trace(cpu, PC_REG);
+
+ // Backtrace
+ if (gator_backtrace_depth)
+ arm_backtrace_eabi(cpu, regs, gator_backtrace_depth);
+ }
+
+ marshal_backtrace_footer();
+}
+
+/******************************************************************************
+ * hrtimer interrupt processing
+ ******************************************************************************/
+static void gator_timer_interrupt(void)
+{
+ struct pt_regs *const regs = get_irq_regs();
+ gator_backtrace_handler(regs);
+}
+
+void gator_backtrace_handler(struct pt_regs *const regs)
+{
+ int cpu = smp_processor_id();
+
+ // Output backtrace
+ gator_add_sample(cpu, regs);
+
+ // Collect counters
+ if (!per_cpu(collecting, cpu)) {
+ collect_counters();
+ }
+}
+
+static int gator_running;
+
+// This function runs in interrupt context and on the appropriate core
+static void gator_timer_offline(void *unused)
+{
+ struct gator_interface *gi;
+ int i, len, cpu = smp_processor_id();
+ int *buffer;
+
+ gator_trace_sched_offline();
+ gator_trace_power_offline();
+
+ gator_hrtimer_offline(cpu);
+
+ // Offline any events and output counters
+ if (marshal_event_header()) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->offline) {
+ len = gi->offline(&buffer);
+ marshal_event(len, buffer);
+ }
+ }
+ }
+
+ // Flush all buffers on this core
+ for (i = 0; i < NUM_GATOR_BUFS; i++)
+ gator_commit_buffer(cpu, i);
+}
+
+// This function runs in process context and may be running on a core other than core 'cpu'
+static void gator_timer_offline_dispatch(int cpu)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->offline_dispatch)
+ gi->offline_dispatch(cpu);
+}
+
+static void gator_timer_stop(void)
+{
+ int cpu;
+
+ if (gator_running) {
+ on_each_cpu(gator_timer_offline, NULL, 1);
+ for_each_online_cpu(cpu) {
+ gator_timer_offline_dispatch(cpu);
+ }
+
+ gator_running = 0;
+ gator_hrtimer_shutdown();
+ }
+}
+
+// This function runs in interrupt context and on the appropriate core
+static void gator_timer_online(void *unused)
+{
+ struct gator_interface *gi;
+ int len, cpu = smp_processor_id();
+ int *buffer;
+
+ gator_trace_power_online();
+
+ // online any events and output counters
+ if (marshal_event_header()) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->online) {
+ len = gi->online(&buffer);
+ marshal_event(len, buffer);
+ }
+ }
+ }
+
+ gator_hrtimer_online(cpu);
+#if defined(__arm__) || defined(__aarch64__)
+ {
+ const char *core_name = "Unknown";
+ const u32 cpuid = gator_cpuid();
+ int i;
+
+ for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
+ if (gator_cpus[i].cpuid == cpuid) {
+ core_name = gator_cpus[i].core_name;
+ break;
+ }
+ }
+
+ marshal_core_name(core_name);
+ }
+#endif
+}
+
+// This function runs in interrupt context and may be running on a core other than core 'cpu'
+static void gator_timer_online_dispatch(int cpu)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->online_dispatch)
+ gi->online_dispatch(cpu);
+}
+
+int gator_timer_start(unsigned long sample_rate)
+{
+ int cpu;
+
+ if (gator_running) {
+ pr_notice("gator: already running\n");
+ return 0;
+ }
+
+ gator_running = 1;
+
+ if (gator_hrtimer_init(sample_rate, gator_timer_interrupt) == -1)
+ return -1;
+
+ for_each_online_cpu(cpu) {
+ gator_timer_online_dispatch(cpu);
+ }
+ on_each_cpu(gator_timer_online, NULL, 1);
+
+ return 0;
+}
+
+static uint64_t gator_get_time(void)
+{
+ struct timespec ts;
+ uint64_t timestamp;
+
+ //getnstimeofday(&ts);
+ do_posix_clock_monotonic_gettime(&ts);
+ timestamp = timespec_to_ns(&ts) - monotonic_started;
+
+ return timestamp;
+}
+
+/******************************************************************************
+ * cpu hotplug and pm notifiers
+ ******************************************************************************/
+static int __cpuinit gator_hotcpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ smp_call_function_single(cpu, gator_timer_offline, NULL, 1);
+ gator_timer_offline_dispatch(cpu);
+ break;
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ gator_timer_online_dispatch(cpu);
+ smp_call_function_single(cpu, gator_timer_online, NULL, 1);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata gator_hotcpu_notifier = {
+ .notifier_call = gator_hotcpu_notify,
+};
+
+// n.b. calling "on_each_cpu" only runs on those that are online
+// Registered linux events are not disabled, so their counters will continue to collect
+static int gator_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy)
+{
+ int cpu;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ unregister_hotcpu_notifier(&gator_hotcpu_notifier);
+ unregister_scheduler_tracepoints();
+ on_each_cpu(gator_timer_offline, NULL, 1);
+ for_each_online_cpu(cpu) {
+ gator_timer_offline_dispatch(cpu);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ for_each_online_cpu(cpu) {
+ gator_timer_online_dispatch(cpu);
+ }
+ on_each_cpu(gator_timer_online, NULL, 1);
+ register_scheduler_tracepoints();
+ register_hotcpu_notifier(&gator_hotcpu_notifier);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gator_pm_notifier = {
+ .notifier_call = gator_pm_notify,
+};
+
+static int gator_notifier_start(void)
+{
+ int retval;
+ retval = register_hotcpu_notifier(&gator_hotcpu_notifier);
+ if (retval == 0)
+ retval = register_pm_notifier(&gator_pm_notifier);
+ return retval;
+}
+
+static void gator_notifier_stop(void)
+{
+ unregister_pm_notifier(&gator_pm_notifier);
+ unregister_hotcpu_notifier(&gator_hotcpu_notifier);
+}
+
+/******************************************************************************
+ * Main
+ ******************************************************************************/
+static void gator_summary(void)
+{
+ uint64_t timestamp;
+ struct timespec uptime_ts;
+
+ timestamp = gator_get_time();
+
+ do_posix_clock_monotonic_gettime(&uptime_ts);
+ monotonic_started = timespec_to_ns(&uptime_ts);
+
+ marshal_summary(timestamp, monotonic_started);
+}
+
+int gator_events_install(struct gator_interface *interface)
+{
+ list_add_tail(&interface->list, &gator_events);
+
+ return 0;
+}
+
+int gator_events_get_key(void)
+{
+ // key of zero is reserved as a timestamp
+ static int key = 1;
+
+ return key++;
+}
+
+static int gator_init(void)
+{
+ int i;
+
+ // events sources (gator_events.h, generated by gator_events.sh)
+ for (i = 0; i < ARRAY_SIZE(gator_events_list); i++)
+ if (gator_events_list[i])
+ gator_events_list[i]();
+
+ gator_trace_power_init();
+
+ return 0;
+}
+
+static void gator_exit(void)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->shutdown)
+ gi->shutdown();
+}
+
+static int gator_start(void)
+{
+ unsigned long cpu, i;
+ struct gator_interface *gi;
+
+ // Initialize the buffer with the frame type and core
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < NUM_GATOR_BUFS; i++) {
+ marshal_frame(cpu, i);
+ }
+ }
+
+ // Capture the start time
+ gator_summary();
+
+ // start all events
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->start && gi->start() != 0) {
+ struct list_head *ptr = gi->list.prev;
+
+ while (ptr != &gator_events) {
+ gi = list_entry(ptr, struct gator_interface, list);
+
+ if (gi->stop)
+ gi->stop();
+
+ ptr = ptr->prev;
+ }
+ goto events_failure;
+ }
+ }
+
+ // cookies shall be initialized before trace_sched_start() and gator_timer_start()
+ if (cookies_initialize())
+ goto cookies_failure;
+ if (gator_annotate_start())
+ goto annotate_failure;
+ if (gator_trace_sched_start())
+ goto sched_failure;
+ if (gator_trace_power_start())
+ goto power_failure;
+ if (gator_trace_gpu_start())
+ goto gpu_failure;
+ if (gator_timer_start(gator_timer_count))
+ goto timer_failure;
+ if (gator_notifier_start())
+ goto notifier_failure;
+
+ return 0;
+
+notifier_failure:
+ gator_timer_stop();
+timer_failure:
+ gator_trace_gpu_stop();
+gpu_failure:
+ gator_trace_power_stop();
+power_failure:
+ gator_trace_sched_stop();
+sched_failure:
+ gator_annotate_stop();
+annotate_failure:
+ cookies_release();
+cookies_failure:
+ // stop all events
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->stop)
+ gi->stop();
+events_failure:
+
+ return -1;
+}
+
+static void gator_stop(void)
+{
+ struct gator_interface *gi;
+
+ gator_annotate_stop();
+ gator_trace_sched_stop();
+ gator_trace_power_stop();
+ gator_trace_gpu_stop();
+
+ // stop all interrupt callback reads before tearing down other interfaces
+ gator_notifier_stop(); // should be called before gator_timer_stop to avoid re-enabling the hrtimer after it has been offlined
+ gator_timer_stop();
+
+ // stop all events
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->stop)
+ gi->stop();
+}
+
+/******************************************************************************
+ * Filesystem
+ ******************************************************************************/
+/* fopen("buffer") */
+static int gator_op_setup(void)
+{
+ int err = 0;
+ int cpu, i;
+
+ mutex_lock(&start_mutex);
+
+ gator_buffer_size[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE;
+ gator_buffer_mask[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE - 1;
+
+ gator_buffer_size[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE;
+ gator_buffer_mask[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[NAME_BUF] = NAME_BUFFER_SIZE;
+ gator_buffer_mask[NAME_BUF] = NAME_BUFFER_SIZE - 1;
+
+ gator_buffer_size[COUNTER_BUF] = COUNTER_BUFFER_SIZE;
+ gator_buffer_mask[COUNTER_BUF] = COUNTER_BUFFER_SIZE - 1;
+
+ gator_buffer_size[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE;
+ gator_buffer_mask[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE - 1;
+
+ gator_buffer_size[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE;
+ gator_buffer_mask[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE;
+ gator_buffer_mask[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[GPU_TRACE_BUF] = GPU_TRACE_BUFFER_SIZE;
+ gator_buffer_mask[GPU_TRACE_BUF] = GPU_TRACE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[IDLE_BUF] = IDLE_BUFFER_SIZE;
+ gator_buffer_mask[IDLE_BUF] = IDLE_BUFFER_SIZE - 1;
+
+ // Initialize percpu per buffer variables
+ for (i = 0; i < NUM_GATOR_BUFS; i++) {
+ // Verify buffers are a power of 2
+ if (gator_buffer_size[i] & (gator_buffer_size[i] - 1)) {
+ err = -ENOEXEC;
+ goto setup_error;
+ }
+
+ for_each_present_cpu(cpu) {
+ per_cpu(gator_buffer_read, cpu)[i] = 0;
+ per_cpu(gator_buffer_write, cpu)[i] = 0;
+ per_cpu(gator_buffer_commit, cpu)[i] = 0;
+ per_cpu(buffer_space_available, cpu)[i] = true;
+
+ // Annotation is a special case that only uses a single buffer
+ if (cpu > 0 && i == ANNOTATE_BUF) {
+ per_cpu(gator_buffer, cpu)[i] = NULL;
+ continue;
+ }
+
+ per_cpu(gator_buffer, cpu)[i] = vmalloc(gator_buffer_size[i]);
+ if (!per_cpu(gator_buffer, cpu)[i]) {
+ err = -ENOMEM;
+ goto setup_error;
+ }
+ }
+ }
+
+setup_error:
+ mutex_unlock(&start_mutex);
+ return err;
+}
+
+/* Actually start profiling (echo 1>/dev/gator/enable) */
+static int gator_op_start(void)
+{
+ int err = 0;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started || gator_start())
+ err = -EINVAL;
+ else
+ gator_started = 1;
+
+ mutex_unlock(&start_mutex);
+
+ return err;
+}
+
+/* echo 0>/dev/gator/enable */
+static void gator_op_stop(void)
+{
+ mutex_lock(&start_mutex);
+
+ if (gator_started) {
+ gator_stop();
+
+ mutex_lock(&gator_buffer_mutex);
+
+ gator_started = 0;
+ cookies_release();
+ wake_up(&gator_buffer_wait);
+
+ mutex_unlock(&gator_buffer_mutex);
+ }
+
+ mutex_unlock(&start_mutex);
+}
+
+static void gator_shutdown(void)
+{
+ int cpu, i;
+
+ mutex_lock(&start_mutex);
+
+ for_each_present_cpu(cpu) {
+ mutex_lock(&gator_buffer_mutex);
+ for (i = 0; i < NUM_GATOR_BUFS; i++) {
+ vfree(per_cpu(gator_buffer, cpu)[i]);
+ per_cpu(gator_buffer, cpu)[i] = NULL;
+ per_cpu(gator_buffer_read, cpu)[i] = 0;
+ per_cpu(gator_buffer_write, cpu)[i] = 0;
+ per_cpu(gator_buffer_commit, cpu)[i] = 0;
+ per_cpu(buffer_space_available, cpu)[i] = true;
+ }
+ mutex_unlock(&gator_buffer_mutex);
+ }
+
+ mutex_unlock(&start_mutex);
+}
+
+static int gator_set_backtrace(unsigned long val)
+{
+ int err = 0;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started)
+ err = -EBUSY;
+ else
+ gator_backtrace_depth = val;
+
+ mutex_unlock(&start_mutex);
+
+ return err;
+}
+
+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ return gatorfs_ulong_to_user(gator_started, buf, count, offset);
+}
+
+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val)
+ retval = gator_op_start();
+ else
+ gator_op_stop();
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static const struct file_operations enable_fops = {
+ .read = enable_read,
+ .write = enable_write,
+};
+
+static int userspace_buffer_open(struct inode *inode, struct file *file)
+{
+ int err = -EPERM;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (test_and_set_bit_lock(0, &gator_buffer_opened))
+ return -EBUSY;
+
+ if ((err = gator_op_setup()))
+ goto fail;
+
+ /* NB: the actual start happens from userspace
+ * echo 1 >/dev/gator/enable
+ */
+
+ return 0;
+
+fail:
+ __clear_bit_unlock(0, &gator_buffer_opened);
+ return err;
+}
+
+static int userspace_buffer_release(struct inode *inode, struct file *file)
+{
+ gator_op_stop();
+ gator_shutdown();
+ __clear_bit_unlock(0, &gator_buffer_opened);
+ return 0;
+}
+
+static ssize_t userspace_buffer_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ int retval = -EINVAL;
+ int commit = 0, length1, length2, read;
+ char *buffer1;
+ char *buffer2 = NULL;
+ int cpu, buftype;
+
+ /* do not handle partial reads */
+ if (count != userspace_buffer_size || *offset)
+ return -EINVAL;
+
+ // sleep until the condition is true or a signal is received
+ // the condition is checked each time gator_buffer_wait is woken up
+ buftype = cpu = -1;
+ wait_event_interruptible(gator_buffer_wait, buffer_commit_ready(&cpu, &buftype) || !gator_started);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ length2 = 0;
+ retval = -EFAULT;
+
+ mutex_lock(&gator_buffer_mutex);
+
+ if (buftype == -1 || cpu == -1) {
+ retval = 0;
+ goto out;
+ }
+
+ read = per_cpu(gator_buffer_read, cpu)[buftype];
+ commit = per_cpu(gator_buffer_commit, cpu)[buftype];
+
+ /* May happen if the buffer is freed during pending reads. */
+ if (!per_cpu(gator_buffer, cpu)[buftype]) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ /* determine the size of two halves */
+ length1 = commit - read;
+ buffer1 = &(per_cpu(gator_buffer, cpu)[buftype][read]);
+ buffer2 = &(per_cpu(gator_buffer, cpu)[buftype][0]);
+ if (length1 < 0) {
+ length1 = gator_buffer_size[buftype] - read;
+ length2 = commit;
+ }
+
+ /* start, middle or end */
+ if (length1 > 0) {
+ if (copy_to_user(&buf[0], buffer1, length1)) {
+ goto out;
+ }
+ }
+
+ /* possible wrap around */
+ if (length2 > 0) {
+ if (copy_to_user(&buf[length1], buffer2, length2)) {
+ goto out;
+ }
+ }
+
+ per_cpu(gator_buffer_read, cpu)[buftype] = commit;
+ retval = length1 + length2;
+
+ /* kick just in case we've lost an SMP event */
+ wake_up(&gator_buffer_wait);
+
+out:
+ mutex_unlock(&gator_buffer_mutex);
+ return retval;
+}
+
+const struct file_operations gator_event_buffer_fops = {
+ .open = userspace_buffer_open,
+ .release = userspace_buffer_release,
+ .read = userspace_buffer_read,
+};
+
+static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ return gatorfs_ulong_to_user(gator_backtrace_depth, buf, count, offset);
+}
+
+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ retval = gator_set_backtrace(val);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static const struct file_operations depth_fops = {
+ .read = depth_read,
+ .write = depth_write
+};
+
+void gator_op_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ struct gator_interface *gi;
+ int cpu;
+
+ /* reinitialize default values */
+ gator_cpu_cores = 0;
+ for_each_present_cpu(cpu) {
+ gator_cpu_cores++;
+ }
+ userspace_buffer_size = BACKTRACE_BUFFER_SIZE;
+ gator_response_type = 1;
+
+ gatorfs_create_file(sb, root, "enable", &enable_fops);
+ gatorfs_create_file(sb, root, "buffer", &gator_event_buffer_fops);
+ gatorfs_create_file(sb, root, "backtrace_depth", &depth_fops);
+ gatorfs_create_ro_ulong(sb, root, "cpu_cores", &gator_cpu_cores);
+ gatorfs_create_ro_ulong(sb, root, "buffer_size", &userspace_buffer_size);
+ gatorfs_create_ulong(sb, root, "tick", &gator_timer_count);
+ gatorfs_create_ulong(sb, root, "response_type", &gator_response_type);
+ gatorfs_create_ro_ulong(sb, root, "version", &gator_protocol_version);
+
+ // Annotate interface
+ gator_annotate_create_files(sb, root);
+
+ // Linux Events
+ dir = gatorfs_mkdir(sb, root, "events");
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->create_files)
+ gi->create_files(sb, dir);
+
+ // Power interface
+ gator_trace_power_create_files(sb, dir);
+}
+
+/******************************************************************************
+ * Module
+ ******************************************************************************/
+static int __init gator_module_init(void)
+{
+ if (gatorfs_register()) {
+ return -1;
+ }
+
+ if (gator_init()) {
+ gatorfs_unregister();
+ return -1;
+ }
+
+ setup_timer(&gator_buffer_wake_up_timer, gator_buffer_wake_up, 0);
+
+ return 0;
+}
+
+static void __exit gator_module_exit(void)
+{
+ del_timer_sync(&gator_buffer_wake_up_timer);
+ tracepoint_synchronize_unregister();
+ gator_exit();
+ gatorfs_unregister();
+}
+
+module_init(gator_module_init);
+module_exit(gator_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("Gator system profiler");
diff --git a/drivers/gator/gator_marshaling.c b/drivers/gator/gator_marshaling.c
new file mode 100644
index 000000000000..b2efdd2a0fa5
--- /dev/null
+++ b/drivers/gator/gator_marshaling.c
@@ -0,0 +1,338 @@
+/**
+ * Copyright (C) ARM Limited 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+static void marshal_summary(long long timestamp, long long uptime)
+{
+ int cpu = 0;
+ gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, timestamp);
+ gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, uptime);
+ buffer_check(cpu, SUMMARY_BUF);
+}
+
+static bool marshal_cookie_header(const char *text)
+{
+ int cpu = smp_processor_id();
+ return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
+}
+
+static void marshal_cookie(int cookie, const char *text)
+{
+ int cpu = smp_processor_id();
+ // buffer_check_space already called by marshal_cookie_header
+ gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
+ gator_buffer_write_string(cpu, NAME_BUF, text);
+ buffer_check(cpu, NAME_BUF);
+}
+
+static void marshal_thread_name(int pid, char *name)
+{
+ unsigned long flags, cpu;
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (buffer_check_space(cpu, NAME_BUF, TASK_COMM_LEN + 3 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+ gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_THREAD_NAME);
+ gator_buffer_write_packed_int64(cpu, NAME_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
+ gator_buffer_write_string(cpu, NAME_BUF, name);
+ }
+ buffer_check(cpu, NAME_BUF);
+ local_irq_restore(flags);
+}
+
+static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, int inKernel)
+{
+ int cpu = smp_processor_id();
+ if (buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, exec_cookie);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, tgid);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, pid);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, inKernel);
+ return true;
+ }
+
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, BACKTRACE_BUF);
+
+ return false;
+}
+
+static void marshal_backtrace(unsigned long address, int cookie)
+{
+ int cpu = smp_processor_id();
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
+ gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
+}
+
+static void marshal_backtrace_footer(void)
+{
+ int cpu = smp_processor_id();
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);
+
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, BACKTRACE_BUF);
+}
+
+static bool marshal_event_header(void)
+{
+ unsigned long flags, cpu = smp_processor_id();
+ bool retval = false;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0); // key of zero indicates a timestamp
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, gator_get_time());
+ retval = true;
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, BLOCK_COUNTER_BUF);
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static void marshal_event(int len, int *buffer)
+{
+ unsigned long i, flags, cpu = smp_processor_id();
+
+ if (len <= 0)
+ return;
+
+ // length must be even since all data is a (key, value) pair
+ if (len & 0x1) {
+ pr_err("gator: invalid counter data detected and discarded");
+ return;
+ }
+
+ // events must be written in key,value pairs
+ local_irq_save(flags);
+ for (i = 0; i < len; i += 2) {
+ if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32)) {
+ break;
+ }
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i]);
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i + 1]);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, BLOCK_COUNTER_BUF);
+ local_irq_restore(flags);
+}
+
+static void marshal_event64(int len, long long *buffer64)
+{
+ unsigned long i, flags, cpu = smp_processor_id();
+
+ if (len <= 0)
+ return;
+
+ // length must be even since all data is a (key, value) pair
+ if (len & 0x1) {
+ pr_err("gator: invalid counter data detected and discarded");
+ return;
+ }
+
+ // events must be written in key,value pairs
+ local_irq_save(flags);
+ for (i = 0; i < len; i += 2) {
+ if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64)) {
+ break;
+ }
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, BLOCK_COUNTER_BUF);
+ local_irq_restore(flags);
+}
+
+#if GATOR_CPU_FREQ_SUPPORT
+static void marshal_event_single(int core, int key, int value)
+{
+ unsigned long flags, cpu;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (buffer_check_space(cpu, COUNTER_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int64(cpu, COUNTER_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, value);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, COUNTER_BUF);
+ local_irq_restore(flags);
+}
+#endif
+
+static void marshal_sched_gpu_start(int unit, int core, int tgid, int pid)
+{
+ unsigned long cpu = smp_processor_id(), flags;
+
+ if (!per_cpu(gator_buffer, cpu)[GPU_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, GPU_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, MESSAGE_GPU_START);
+ gator_buffer_write_packed_int64(cpu, GPU_TRACE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, unit);
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, core);
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, tgid);
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, pid);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, GPU_TRACE_BUF);
+ local_irq_restore(flags);
+}
+
+static void marshal_sched_gpu_stop(int unit, int core)
+{
+ unsigned long cpu = smp_processor_id(), flags;
+
+ if (!per_cpu(gator_buffer, cpu)[GPU_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, GPU_TRACE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, MESSAGE_GPU_STOP);
+ gator_buffer_write_packed_int64(cpu, GPU_TRACE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, unit);
+ gator_buffer_write_packed_int(cpu, GPU_TRACE_BUF, core);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, GPU_TRACE_BUF);
+ local_irq_restore(flags);
+}
+
+static void marshal_sched_trace_switch(int tgid, int pid, int cookie, int state)
+{
+ unsigned long cpu = smp_processor_id(), flags;
+
+ if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_SWITCH);
+ gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, tgid);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, cookie);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, state);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, SCHED_TRACE_BUF);
+ local_irq_restore(flags);
+}
+
+static void marshal_sched_trace_exit(int tgid, int pid)
+{
+ unsigned long cpu = smp_processor_id(), flags;
+
+ if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_EXIT);
+ gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, SCHED_TRACE_BUF);
+ local_irq_restore(flags);
+}
+
+#if GATOR_CPU_FREQ_SUPPORT
+static void marshal_idle(int core, int state)
+{
+ unsigned long flags, cpu;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (buffer_check_space(cpu, IDLE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, IDLE_BUF, state);
+ gator_buffer_write_packed_int64(cpu, IDLE_BUF, gator_get_time());
+ gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
+ }
+ // Check and commit; commit is set to occur once buffer is 3/4 full
+ buffer_check(cpu, IDLE_BUF);
+ local_irq_restore(flags);
+}
+#endif
+
+static void marshal_frame(int cpu, int buftype)
+{
+ int frame;
+
+ if (!per_cpu(gator_buffer, cpu)[buftype]) {
+ return;
+ }
+
+ switch (buftype) {
+ case SUMMARY_BUF:
+ frame = FRAME_SUMMARY;
+ break;
+ case BACKTRACE_BUF:
+ frame = FRAME_BACKTRACE;
+ break;
+ case NAME_BUF:
+ frame = FRAME_NAME;
+ break;
+ case COUNTER_BUF:
+ frame = FRAME_COUNTER;
+ break;
+ case BLOCK_COUNTER_BUF:
+ frame = FRAME_BLOCK_COUNTER;
+ break;
+ case ANNOTATE_BUF:
+ frame = FRAME_ANNOTATE;
+ break;
+ case SCHED_TRACE_BUF:
+ frame = FRAME_SCHED_TRACE;
+ break;
+ case GPU_TRACE_BUF:
+ frame = FRAME_GPU_TRACE;
+ break;
+ case IDLE_BUF:
+ frame = FRAME_IDLE;
+ break;
+ default:
+ frame = -1;
+ break;
+ }
+
+ // add response type
+ if (gator_response_type > 0) {
+ gator_buffer_write_packed_int(cpu, buftype, gator_response_type);
+ }
+
+ // leave space for 4-byte unpacked length
+ per_cpu(gator_buffer_write, cpu)[buftype] = (per_cpu(gator_buffer_write, cpu)[buftype] + 4) & gator_buffer_mask[buftype];
+
+ // add frame type and core number
+ gator_buffer_write_packed_int(cpu, buftype, frame);
+ gator_buffer_write_packed_int(cpu, buftype, cpu);
+}
+
+#if defined(__arm__) || defined(__aarch64__)
+static void marshal_core_name(const char *name)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, NAME_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
+ gator_buffer_write_packed_int(cpu, NAME_BUF, HRTIMER_CORE_NAME);
+ gator_buffer_write_string(cpu, NAME_BUF, name);
+ }
+ buffer_check(cpu, NAME_BUF);
+ local_irq_restore(flags);
+}
+#endif
diff --git a/drivers/gator/gator_pack.c b/drivers/gator/gator_pack.c
new file mode 100644
index 000000000000..119746befb09
--- /dev/null
+++ b/drivers/gator/gator_pack.c
@@ -0,0 +1,185 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+static void gator_buffer_write_packed_int(int cpu, int buftype, unsigned int x)
+{
+ uint32_t write = per_cpu(gator_buffer_write, cpu)[buftype];
+ uint32_t mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+ int write0 = (write + 0) & mask;
+ int write1 = (write + 1) & mask;
+
+ if ((x & 0xffffff80) == 0) {
+ buffer[write0] = x & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write1;
+ } else if ((x & 0xffffc000) == 0) {
+ int write2 = (write + 2) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write2;
+ } else if ((x & 0xffe00000) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write3;
+ } else if ((x & 0xf0000000) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write4;
+ } else {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) & 0x0f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write5;
+ }
+}
+
+static void gator_buffer_write_packed_int64(int cpu, int buftype, unsigned long long x)
+{
+ uint32_t write = per_cpu(gator_buffer_write, cpu)[buftype];
+ uint32_t mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+ int write0 = (write + 0) & mask;
+ int write1 = (write + 1) & mask;
+
+ if ((x & 0xffffffffffffff80LL) == 0) {
+ buffer[write0] = x & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write1;
+ } else if ((x & 0xffffffffffffc000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write2;
+ } else if ((x & 0xffffffffffe00000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write3;
+ } else if ((x & 0xfffffffff0000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write4;
+ } else if ((x & 0xfffffff800000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write5;
+ } else if ((x & 0xfffffc0000000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ int write6 = (write + 6) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) | 0x80;
+ buffer[write5] = (x >> 35) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write6;
+ } else if ((x & 0xfffe000000000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ int write6 = (write + 6) & mask;
+ int write7 = (write + 7) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) | 0x80;
+ buffer[write5] = (x >> 35) | 0x80;
+ buffer[write6] = (x >> 42) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write7;
+ } else if ((x & 0xff00000000000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ int write6 = (write + 6) & mask;
+ int write7 = (write + 7) & mask;
+ int write8 = (write + 8) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) | 0x80;
+ buffer[write5] = (x >> 35) | 0x80;
+ buffer[write6] = (x >> 42) | 0x80;
+ buffer[write7] = (x >> 49) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write8;
+ } else if ((x & 0x8000000000000000LL) == 0) {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ int write6 = (write + 6) & mask;
+ int write7 = (write + 7) & mask;
+ int write8 = (write + 8) & mask;
+ int write9 = (write + 9) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) | 0x80;
+ buffer[write5] = (x >> 35) | 0x80;
+ buffer[write6] = (x >> 42) | 0x80;
+ buffer[write7] = (x >> 49) | 0x80;
+ buffer[write8] = (x >> 56) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write9;
+ } else {
+ int write2 = (write + 2) & mask;
+ int write3 = (write + 3) & mask;
+ int write4 = (write + 4) & mask;
+ int write5 = (write + 5) & mask;
+ int write6 = (write + 6) & mask;
+ int write7 = (write + 7) & mask;
+ int write8 = (write + 8) & mask;
+ int write9 = (write + 9) & mask;
+ int write10 = (write + 10) & mask;
+ buffer[write0] = x | 0x80;
+ buffer[write1] = (x >> 7) | 0x80;
+ buffer[write2] = (x >> 14) | 0x80;
+ buffer[write3] = (x >> 21) | 0x80;
+ buffer[write4] = (x >> 28) | 0x80;
+ buffer[write5] = (x >> 35) | 0x80;
+ buffer[write6] = (x >> 42) | 0x80;
+ buffer[write7] = (x >> 49) | 0x80;
+ buffer[write8] = (x >> 56) | 0x80;
+ buffer[write9] = (x >> 63) & 0x7f;
+ per_cpu(gator_buffer_write, cpu)[buftype] = write10;
+ }
+}
diff --git a/drivers/gator/gator_trace_gpu.c b/drivers/gator/gator_trace_gpu.c
new file mode 100644
index 000000000000..9fc488bf1e17
--- /dev/null
+++ b/drivers/gator/gator_trace_gpu.c
@@ -0,0 +1,219 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#ifdef MALI_SUPPORT
+#include "linux/mali_linux_trace.h"
+#endif
+#include "gator_trace_gpu.h"
+
+/*
+ * Taken from MALI_PROFILING_EVENT_TYPE_* items in Mali DDK.
+ */
+#define EVENT_TYPE_SINGLE 0
+#define EVENT_TYPE_START 1
+#define EVENT_TYPE_STOP 2
+#define EVENT_TYPE_SUSPEND 3
+#define EVENT_TYPE_RESUME 4
+
+/* Note whether tracepoints have been registered */
+static int mali_timeline_trace_registered;
+static int mali_job_slots_trace_registered;
+static int gpu_trace_registered;
+
+#define GPU_UNIT_NONE 0
+#define GPU_UNIT_VP 1
+#define GPU_UNIT_FP 2
+#define GPU_UNIT_CL 3
+
+#define MALI_400 (0x0b07)
+#define MALI_T6xx (0x0056)
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
+#include "gator_events_mali_400.h"
+
+/*
+ * Taken from MALI_PROFILING_EVENT_CHANNEL_* in Mali DDK.
+ */
+enum {
+ EVENT_CHANNEL_SOFTWARE = 0,
+ EVENT_CHANNEL_VP0 = 1,
+ EVENT_CHANNEL_FP0 = 5,
+ EVENT_CHANNEL_FP1,
+ EVENT_CHANNEL_FP2,
+ EVENT_CHANNEL_FP3,
+ EVENT_CHANNEL_FP4,
+ EVENT_CHANNEL_FP5,
+ EVENT_CHANNEL_FP6,
+ EVENT_CHANNEL_FP7,
+ EVENT_CHANNEL_GPU = 21
+};
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+enum {
+ EVENT_REASON_SINGLE_GPU_NONE = 0,
+ EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+};
+
+GATOR_DEFINE_PROBE(mali_timeline_event, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned int d2, unsigned int d3, unsigned int d4))
+{
+ unsigned int component, state;
+
+ // do as much work as possible before disabling interrupts
+ component = (event_id >> 16) & 0xFF; // component is an 8-bit field
+ state = (event_id >> 24) & 0xF; // state is a 4-bit field
+
+ switch (state) {
+ case EVENT_TYPE_START:
+ if (component == EVENT_CHANNEL_VP0) {
+ /* tgid = d0; pid = d1; */
+ marshal_sched_gpu_start(GPU_UNIT_VP, 0, d0, d1);
+ } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
+ /* tgid = d0; pid = d1; */
+ marshal_sched_gpu_start(GPU_UNIT_FP, component - EVENT_CHANNEL_FP0, d0, d1);
+ }
+ break;
+
+ case EVENT_TYPE_STOP:
+ if (component == EVENT_CHANNEL_VP0) {
+ marshal_sched_gpu_stop(GPU_UNIT_VP, 0);
+ } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
+ marshal_sched_gpu_stop(GPU_UNIT_FP, component - EVENT_CHANNEL_FP0);
+ }
+ break;
+
+ case EVENT_TYPE_SINGLE:
+ if (component == EVENT_CHANNEL_GPU) {
+ unsigned int reason = (event_id & 0xffff);
+
+ if (reason == EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE) {
+ gator_events_mali_log_dvfs_event(d0, d1);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
+GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid))
+{
+ unsigned int component, state, unit;
+
+ component = (event_id >> 16) & 0xFF; // component is an 8-bit field
+ state = (event_id >> 24) & 0xF; // state is a 4-bit field
+
+ switch (component) {
+ case 0:
+ unit = GPU_UNIT_FP;
+ break;
+ case 1:
+ unit = GPU_UNIT_VP;
+ break;
+ case 2:
+ unit = GPU_UNIT_CL;
+ break;
+ default:
+ unit = GPU_UNIT_NONE;
+ }
+
+ if (unit != GPU_UNIT_NONE) {
+ switch (state) {
+ case EVENT_TYPE_START:
+ marshal_sched_gpu_start(unit, 0, tgid, (pid != 0 ? pid : tgid));
+ break;
+ case EVENT_TYPE_STOP:
+ marshal_sched_gpu_stop(unit, 0);
+ break;
+ default:
+ /*
+ * Some jobs can be soft-stopped, so ensure that this terminates the activity trace.
+ */
+ marshal_sched_gpu_stop(unit, 0);
+ }
+ }
+}
+#endif
+
+GATOR_DEFINE_PROBE(gpu_activity_start, TP_PROTO(int gpu_unit, int gpu_core, struct task_struct *p))
+{
+ marshal_sched_gpu_start(gpu_unit, gpu_core, (int)p->tgid, (int)p->pid);
+}
+
+GATOR_DEFINE_PROBE(gpu_activity_stop, TP_PROTO(int gpu_unit, int gpu_core))
+{
+ marshal_sched_gpu_stop(gpu_unit, gpu_core);
+}
+
+int gator_trace_gpu_start(void)
+{
+ /*
+ * Returns nonzero for installation failed
+ * Absence of gpu trace points is not an error
+ */
+
+ gpu_trace_registered = mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
+ if (!GATOR_REGISTER_TRACE(mali_timeline_event)) {
+ mali_timeline_trace_registered = 1;
+ }
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
+ if (!GATOR_REGISTER_TRACE(mali_job_slots_event)) {
+ mali_job_slots_trace_registered = 1;
+ }
+#endif
+
+ if (!mali_timeline_trace_registered) {
+ if (GATOR_REGISTER_TRACE(gpu_activity_start)) {
+ return 0;
+ }
+ if (GATOR_REGISTER_TRACE(gpu_activity_stop)) {
+ GATOR_UNREGISTER_TRACE(gpu_activity_start);
+ return 0;
+ }
+ gpu_trace_registered = 1;
+ }
+
+ return 0;
+}
+
+void gator_trace_gpu_stop(void)
+{
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_T6xx)
+ if (mali_timeline_trace_registered) {
+ GATOR_UNREGISTER_TRACE(mali_timeline_event);
+ }
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_T6xx)
+ if (mali_job_slots_trace_registered) {
+ GATOR_UNREGISTER_TRACE(mali_job_slots_event);
+ }
+#endif
+
+ if (gpu_trace_registered) {
+ GATOR_UNREGISTER_TRACE(gpu_activity_stop);
+ GATOR_UNREGISTER_TRACE(gpu_activity_start);
+ }
+
+ gpu_trace_registered = mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+}
diff --git a/drivers/gator/gator_trace_gpu.h b/drivers/gator/gator_trace_gpu.h
new file mode 100644
index 000000000000..efb47c67a13a
--- /dev/null
+++ b/drivers/gator/gator_trace_gpu.h
@@ -0,0 +1,79 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_GPU
+#define TRACE_GPU gpu
+
+#if !defined(_TRACE_GPU_H)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * UNIT - the GPU processor type
+ * 1 = Vertex Processor
+ * 2 = Fragment Processor
+ *
+ * CORE - the GPU processor core number
+ * this is not the CPU core number
+ */
+
+/*
+ * Tracepoint for calling GPU unit start activity on core
+ */
+TRACE_EVENT(gpu_activity_start,
+
+ TP_PROTO(int gpu_unit, int gpu_core, struct task_struct *p),
+
+ TP_ARGS(gpu_unit, gpu_core, p),
+
+ TP_STRUCT__entry(
+ __field(int, gpu_unit)
+ __field(int, gpu_core)
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->gpu_unit = gpu_unit;
+ __entry->gpu_core = gpu_core;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ ),
+
+ TP_printk("unit=%d core=%d comm=%s pid=%d",
+ __entry->gpu_unit, __entry->gpu_core, __entry->comm,
+ __entry->pid)
+ );
+
+/*
+ * Tracepoint for calling GPU unit stop activity on core
+ */
+TRACE_EVENT(gpu_activity_stop,
+
+ TP_PROTO(int gpu_unit, int gpu_core),
+
+ TP_ARGS(gpu_unit, gpu_core),
+
+ TP_STRUCT__entry(
+ __field(int, gpu_unit)
+ __field(int, gpu_core)
+ ),
+
+ TP_fast_assign(
+ __entry->gpu_unit = gpu_unit;
+ __entry->gpu_core = gpu_core;
+ ),
+
+ TP_printk("unit=%d core=%d", __entry->gpu_unit, __entry->gpu_core)
+ );
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gator/gator_trace_power.c b/drivers/gator/gator_trace_power.c
new file mode 100644
index 000000000000..79fa13c126dc
--- /dev/null
+++ b/drivers/gator/gator_trace_power.c
@@ -0,0 +1,188 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <trace/events/power.h>
+
+#if defined(__arm__)
+
+#include <asm/mach-types.h>
+
+#define implements_wfi() (!machine_is_omap3_beagle())
+
+#else
+
+#define implements_wfi() false
+
+#endif
+
+// cpu_frequency and cpu_idle trace points were introduced in Linux kernel v2.6.38
+// the now deprecated power_frequency trace point was available prior to 2.6.38, but only for x86
+#if GATOR_CPU_FREQ_SUPPORT
+enum {
+ POWER_CPU_FREQ,
+ POWER_CPU_IDLE,
+ POWER_TOTAL
+};
+
+static DEFINE_PER_CPU(ulong, idle_prev_state);
+static ulong power_cpu_enabled[POWER_TOTAL];
+static ulong power_cpu_key[POWER_TOTAL];
+
+static int gator_trace_power_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ // cpu_frequency
+ dir = gatorfs_mkdir(sb, root, "Linux_power_cpu_freq");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &power_cpu_enabled[POWER_CPU_FREQ]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &power_cpu_key[POWER_CPU_FREQ]);
+
+ // cpu_idle
+ dir = gatorfs_mkdir(sb, root, "Linux_power_cpu_idle");
+ if (!dir) {
+ return -1;
+ }
+ gatorfs_create_ulong(sb, dir, "enabled", &power_cpu_enabled[POWER_CPU_IDLE]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &power_cpu_key[POWER_CPU_IDLE]);
+
+ return 0;
+}
+
+// 'cpu' may not equal smp_processor_id(), i.e. may not be running on the core that is having the freq/idle state change
+GATOR_DEFINE_PROBE(cpu_frequency, TP_PROTO(unsigned int frequency, unsigned int cpu))
+{
+ marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], frequency * 1000);
+}
+
+#define WFI_EXIT 2
+#define WFI_ENTER 1
+GATOR_DEFINE_PROBE(cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu))
+{
+ if (state == per_cpu(idle_prev_state, cpu)) {
+ return;
+ }
+
+ if (implements_wfi()) {
+ if (state == PWR_EVENT_EXIT) {
+ // transition from wfi to non-wfi
+ marshal_idle(cpu, WFI_EXIT);
+ } else {
+ // transition from non-wfi to wfi
+ marshal_idle(cpu, WFI_ENTER);
+ }
+ }
+
+ per_cpu(idle_prev_state, cpu) = state;
+
+ if (power_cpu_enabled[POWER_CPU_IDLE]) {
+ // Increment state so that no negative numbers are sent
+ marshal_event_single(cpu, power_cpu_key[POWER_CPU_IDLE], state + 1);
+ }
+}
+
+static void gator_trace_power_online(void)
+{
+ int cpu = smp_processor_id();
+ if (power_cpu_enabled[POWER_CPU_FREQ]) {
+ marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], cpufreq_quick_get(cpu) * 1000);
+ }
+}
+
+static void gator_trace_power_offline(void)
+{
+ // Set frequency to zero on an offline
+ int cpu = smp_processor_id();
+ if (power_cpu_enabled[POWER_CPU_FREQ]) {
+ marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], 0);
+ }
+}
+
+static int gator_trace_power_start(void)
+{
+ int cpu;
+
+ // register tracepoints
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ if (GATOR_REGISTER_TRACE(cpu_frequency))
+ goto fail_cpu_frequency_exit;
+
+ // Always register for cpu:idle for detecting WFI, independent of power_cpu_enabled[POWER_CPU_IDLE]
+ if (GATOR_REGISTER_TRACE(cpu_idle))
+ goto fail_cpu_idle_exit;
+ pr_debug("gator: registered power event tracepoints\n");
+
+ for_each_present_cpu(cpu) {
+ per_cpu(idle_prev_state, cpu) = 0;
+ }
+
+ return 0;
+
+ // unregister tracepoints on error
+fail_cpu_idle_exit:
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ GATOR_UNREGISTER_TRACE(cpu_frequency);
+fail_cpu_frequency_exit:
+ pr_err("gator: power event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_trace_power_stop(void)
+{
+ int i;
+
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ GATOR_UNREGISTER_TRACE(cpu_frequency);
+ GATOR_UNREGISTER_TRACE(cpu_idle);
+ pr_debug("gator: unregistered power event tracepoints\n");
+
+ for (i = 0; i < POWER_TOTAL; i++) {
+ power_cpu_enabled[i] = 0;
+ }
+}
+
+void gator_trace_power_init(void)
+{
+ int i;
+ for (i = 0; i < POWER_TOTAL; i++) {
+ power_cpu_enabled[i] = 0;
+ power_cpu_key[i] = gator_events_get_key();
+ }
+}
+#else
+static int gator_trace_power_create_files(struct super_block *sb, struct dentry *root)
+{
+ return 0;
+}
+
+static void gator_trace_power_online(void)
+{
+}
+
+static void gator_trace_power_offline(void)
+{
+}
+
+static int gator_trace_power_start(void)
+{
+ return 0;
+}
+
+static void gator_trace_power_stop(void)
+{
+}
+
+void gator_trace_power_init(void)
+{
+}
+#endif
diff --git a/drivers/gator/gator_trace_sched.c b/drivers/gator/gator_trace_sched.c
new file mode 100644
index 000000000000..d0336f9ca0e0
--- /dev/null
+++ b/drivers/gator/gator_trace_sched.c
@@ -0,0 +1,208 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <trace/events/sched.h>
+#include "gator.h"
+
+#define SCHED_SWITCH 1
+#define SCHED_PROCESS_EXIT 2
+
+#define TASK_MAP_ENTRIES 1024 /* must be power of 2 */
+#define TASK_MAX_COLLISIONS 2
+
+static DEFINE_PER_CPU(uint64_t *, taskname_keys);
+static DEFINE_PER_CPU(int, collecting);
+
+enum {
+ STATE_WAIT_ON_OTHER = 0,
+ STATE_CONTENTION,
+ STATE_WAIT_ON_IO,
+ STATE_WAIT_ON_MUTEX,
+};
+
+void emit_pid_name(struct task_struct *task)
+{
+ bool found = false;
+ char taskcomm[TASK_COMM_LEN + 3];
+ unsigned long x, cpu = smp_processor_id();
+ uint64_t *keys = &(per_cpu(taskname_keys, cpu)[(task->pid & 0xFF) * TASK_MAX_COLLISIONS]);
+ uint64_t value;
+
+ value = gator_chksum_crc32(task->comm);
+ value = (value << 32) | (uint32_t)task->pid;
+
+ // determine if the thread name was emitted already
+ for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
+ if (keys[x] == value) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ // shift values, new value always in front
+ uint64_t oldv, newv = value;
+ for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
+ oldv = keys[x];
+ keys[x] = newv;
+ newv = oldv;
+ }
+
+ // emit pid names, cannot use get_task_comm, as it's not exported on all kernel versions
+ if (strlcpy(taskcomm, task->comm, TASK_COMM_LEN) == TASK_COMM_LEN - 1) {
+ // append ellipses if task->comm has length of TASK_COMM_LEN - 1
+ strcat(taskcomm, "...");
+ }
+
+ marshal_thread_name(task->pid, taskcomm);
+ }
+}
+
+static void collect_counters(void)
+{
+ int *buffer, len;
+ long long *buffer64;
+ struct gator_interface *gi;
+
+ if (marshal_event_header()) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->read) {
+ len = gi->read(&buffer);
+ marshal_event(len, buffer);
+ } else if (gi->read64) {
+ len = gi->read64(&buffer64);
+ marshal_event64(len, buffer64);
+ }
+ }
+ }
+}
+
+static void probe_sched_write(int type, struct task_struct *task, struct task_struct *old_task)
+{
+ int cookie = 0, state = 0;
+ int cpu = smp_processor_id();
+ int tgid = task->tgid;
+ int pid = task->pid;
+
+ if (type == SCHED_SWITCH) {
+ // do as much work as possible before disabling interrupts
+ cookie = get_exec_cookie(cpu, task);
+ emit_pid_name(task);
+ if (old_task->state == TASK_RUNNING) {
+ state = STATE_CONTENTION;
+ } else if (old_task->in_iowait) {
+ state = STATE_WAIT_ON_IO;
+#ifdef CONFIG_DEBUG_MUTEXES
+ } else if (old_task->blocked_on) {
+ state = STATE_WAIT_ON_MUTEX;
+#endif
+ } else {
+ state = STATE_WAIT_ON_OTHER;
+ }
+
+ per_cpu(collecting, cpu) = 1;
+ collect_counters();
+ per_cpu(collecting, cpu) = 0;
+ }
+
+ // marshal_sched_trace() disables interrupts as the free may trigger while switch is writing to the buffer; disabling preemption is not sufficient
+ // is disable interrupts necessary now that exit is used instead of free?
+ if (type == SCHED_SWITCH) {
+ marshal_sched_trace_switch(tgid, pid, cookie, state);
+ } else {
+ marshal_sched_trace_exit(tgid, pid);
+ }
+}
+
+// special case used during a suspend of the system
+static void trace_sched_insert_idle(void)
+{
+ marshal_sched_trace_switch(0, 0, 0, 0);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
+#else
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
+#endif
+{
+ probe_sched_write(SCHED_SWITCH, next, prev);
+}
+
+GATOR_DEFINE_PROBE(sched_process_exit, TP_PROTO(struct task_struct *p))
+{
+ probe_sched_write(SCHED_PROCESS_EXIT, p, 0);
+}
+
+static void do_nothing(void *info)
+{
+ // Intentionally do nothing
+ (void)info;
+}
+
+static int register_scheduler_tracepoints(void)
+{
+ // register tracepoints
+ if (GATOR_REGISTER_TRACE(sched_switch))
+ goto fail_sched_switch;
+ if (GATOR_REGISTER_TRACE(sched_process_exit))
+ goto fail_sched_process_exit;
+ pr_debug("gator: registered tracepoints\n");
+
+ // Now that the scheduler tracepoint is registered, force a context switch
+ // on all cpus to capture what is currently running.
+ on_each_cpu(do_nothing, NULL, 0);
+
+ return 0;
+
+ // unregister tracepoints on error
+fail_sched_process_exit:
+ GATOR_UNREGISTER_TRACE(sched_switch);
+fail_sched_switch:
+ pr_err("gator: tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+int gator_trace_sched_start(void)
+{
+ int cpu, size;
+
+ for_each_present_cpu(cpu) {
+ size = TASK_MAP_ENTRIES * TASK_MAX_COLLISIONS * sizeof(uint64_t);
+ per_cpu(taskname_keys, cpu) = (uint64_t *)kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(taskname_keys, cpu))
+ return -1;
+ memset(per_cpu(taskname_keys, cpu), 0, size);
+ }
+
+ return register_scheduler_tracepoints();
+}
+
+void gator_trace_sched_offline(void)
+{
+ trace_sched_insert_idle();
+}
+
+static void unregister_scheduler_tracepoints(void)
+{
+ GATOR_UNREGISTER_TRACE(sched_switch);
+ GATOR_UNREGISTER_TRACE(sched_process_exit);
+ pr_debug("gator: unregistered tracepoints\n");
+}
+
+void gator_trace_sched_stop(void)
+{
+ int cpu;
+ unregister_scheduler_tracepoints();
+
+ for_each_present_cpu(cpu) {
+ kfree(per_cpu(taskname_keys, cpu));
+ }
+}
diff --git a/drivers/gator/mali_t6xx.mk b/drivers/gator/mali_t6xx.mk
new file mode 100644
index 000000000000..2cc64113ea2c
--- /dev/null
+++ b/drivers/gator/mali_t6xx.mk
@@ -0,0 +1,25 @@
+# Defines for Mali-T6xx driver
+EXTRA_CFLAGS += -DMALI_USE_UMP=1 \
+ -DMALI_LICENSE_IS_GPL=1 \
+ -DMALI_BASE_TRACK_MEMLEAK=0 \
+ -DMALI_DEBUG=0 \
+ -DMALI_ERROR_INJECT_ON=0 \
+ -DMALI_CUSTOMER_RELEASE=1 \
+ -DMALI_UNIT_TEST=0 \
+ -DMALI_BACKEND_KERNEL=1 \
+ -DMALI_NO_MALI=0
+
+KBASE_DIR = $(DDK_DIR)/kernel/drivers/gpu/arm/t6xx/kbase
+OSK_DIR = $(DDK_DIR)/kernel/drivers/gpu/arm/t6xx/kbase/osk
+UMP_DIR = $(DDK_DIR)/kernel/include/linux
+
+# Include directories in the DDK
+EXTRA_CFLAGS += -I$(DDK_DIR) \
+ -I$(KBASE_DIR)/.. \
+ -I$(OSK_DIR)/.. \
+ -I$(UMP_DIR)/.. \
+ -I$(DDK_DIR)/kernel/include \
+ -I$(KBASE_DIR)/osk/src/linux/include \
+ -I$(KBASE_DIR)/platform_dummy \
+ -I$(KBASE_DIR)/src
+
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd0..ca2d3b34dbf5 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/ stub/
+obj-y += drm/ vga/ stub/ ion/
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
if (adj_start < start)
adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
- unsigned long adj_start;
- unsigned long adj_end;
+ unsigned long adj_start, adj_end;
mm->scanned_blocks++;
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
- hole_start = drm_mm_hole_node_start(prev_node);
- hole_end = drm_mm_hole_node_end(prev_node);
-
- adj_start = hole_start;
- adj_end = hole_end;
-
- if (mm->color_adjust)
- mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end;
}
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
- mm->scan_hit_size = hole_end;
-
+ mm->scan_hit_end = hole_end;
return 1;
}
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
- INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
- /* Only need to check for containement because start&size for the
- * complete resulting free block (not just the desired part) is
- * stored. */
- if (node->start >= mm->scan_hit_start &&
- node->start + node->size
- <= mm->scan_hit_start + mm->scan_hit_size) {
- return 1;
- }
-
- return 0;
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index da3c82e301b1..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count;
}
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ return __i915_gem_shrink(dev_priv, target, true);
+}
+
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
- obj->user_pin_count++;
- obj->pin_filp = file;
- if (obj->user_pin_count == 1) {
+ if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@@ -4395,6 +4403,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
+ nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a9fb046b94a1..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
- if (obj->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
+ }
- if (mode_cmd->pitches[0] & 63)
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
/* FIXME <= Gen4 stride limits are bit unclear */
- if (mode_cmd->pitches[0] > 32768)
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
if (obj->tiling_mode != I915_TILING_NONE &&
- mode_cmd->pitches[0] != obj->stride)
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
return -EINVAL;
+ }
/* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b9a660a53677..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "ZOTAC ZBOXSD-ID12/ID13",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6f54ffab3ba..e83a11794172 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter
*/
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (to_intel_crtc(tmp_crtc)->active &&
- !to_intel_crtc(tmp_crtc)->primary_disabled &&
- tmp_crtc->fb) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+ if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
+ if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+ if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 827dcd4edf1c..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
- if (ret) {
- nouveau_namedb_destroy(&client->base);
+ if (ret)
return ret;
- }
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
- handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
+
+ *phandle = handle;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 0f09af135415..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
- i += 3;
- } else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610798 + (i * 8));
- i += 3;
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 4;
+ }
}
if (!(ctrl & (1 << head)))
return false;
+ i--;
data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
- i += 3;
- } else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610794 + (i * 8));
- i += 3;
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 4;
+ }
}
if (!(ctrl & (1 << head)))
return 0x0000;
+ i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
int nouveau_client_create_(const char *name, u64 device, const char *cfg,
const char *dbg, int, void **);
+#define nouveau_client_destroy(p) \
+ nouveau_namedb_destroy(&(p)->base)
+
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
PLL_UNK42 = 0x42,
PLL_VPLL0 = 0x80,
PLL_VPLL1 = 0x81,
+ PLL_VPLL2 = 0x82,
+ PLL_VPLL3 = 0x83,
PLL_MAX = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2917d552689b..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
- return;
}
value = init_rdport(init, port) & mask;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 306bdf121452..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->memtype = type;
mem->size = size;
- mutex_lock(&mm->mutex);
+ mutex_lock(&pfb->base.mutex);
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
else
ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
if (ret) {
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
pfb->ram.put(pfb, &mem);
return ret;
}
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
list_add(&iobj->head, &imem->list);
+ mutex_unlock(&imem->base.mutex);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
- if (iobj->head.prev)
- list_del(&iobj->head);
+ struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+ mutex_lock(&subdev->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&subdev->mutex);
+
return nouveau_object_destroy(&iobj->base);
}
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
}
}
+ mutex_unlock(&imem->base.mutex);
+
return 0;
}
@@ -104,17 +114,26 @@ int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
- int i;
+ int i, ret = 0;
if (suspend) {
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- iobj->suspend[i / 4] = nv_ro32(iobj, i);
- } else
- return -ENOMEM;
+ if (!iobj->suspend) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < iobj->size; i += 4)
+ iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
+
+ mutex_unlock(&imem->base.mutex);
+
+ if (ret)
+ return ret;
}
return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_length = (offset + length) - mm_offset;
int ret;
- vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
return ret;
}
+ *pvm = vm;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ac340ba32017..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- int i;
+ struct nouveau_i2c_port *port = NULL;
+ int i, panel = -ENODEV;
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (panel == 0) {
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+ msleep(300);
+ }
+ }
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_port *port = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
- return port;
+ break;
}
+
+ port = NULL;
}
- return NULL;
+ /* eDP panel not detected, restore panel power GPIO to previous
+ * state to avoid confusing the SOR for other output types.
+ */
+ if (!port && panel == 0)
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+ return port;
}
static struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4188f24fc75..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* power on internal panel if it's not already. the init tables of
- * some vbios default this to off for some reason, causing the
- * panel to not work after resume
- */
- if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
-
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 180a45e3b525..8b090f1eb51d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
struct nouveau_cli *cli;
int ret;
+ *pcli = NULL;
ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
- if (ret)
+ if (ret) {
+ if (cli)
+ nouveau_client_destroy(&cli->base);
+ *pcli = NULL;
return ret;
+ }
mutex_init(&cli->mutex);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
static inline bool is_powersaving_dpms(int mode)
{
- return (mode != DRM_MODE_DPMS_ON);
+ return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 7ae7f97a6d4d..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
int
nv10_fence_create(struct nouveau_drm *drm)
{
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index c20f2727ea0b..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
if (ret)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 03191a56eb44..69ec24ab8d63 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
@@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
+ *cs_reloc = NULL;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- *cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
- if (idx >= relocs_chunk->length_dw) {
+ if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = p->relocs_ptr[idx];
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 396baba0141a..469661fd1903 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -279,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- if ((p->rdev->flags & RADEON_IS_AGP)) {
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
return -ENOMEM;
}
}
@@ -583,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
- bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 512f44add89f..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
- char rbuf[3];
+ char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
+ rbuf = kmalloc(2, GFP_KERNEL);
+ if (rbuf == NULL)
+ goto error;
+
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->ddev->usbdev,
usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- i--;
goto error;
}
block[i] = rbuf[1];
}
+ kfree(rbuf);
return block;
error:
kfree(block);
+ kfree(rbuf);
return NULL;
}
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
+ /*
+ * We only read the main block, but if the monitor reports extension
+ * blocks then the drm edid code expects them to be present, so patch
+ * the extension count to 0.
+ */
+ edid->checksum += edid->extensions;
+ edid->extensions = 0;
+
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
new file mode 100644
index 000000000000..b5bfdb47fd09
--- /dev/null
+++ b/drivers/gpu/ion/Kconfig
@@ -0,0 +1,13 @@
+menuconfig ION
+ tristate "Ion Memory Manager"
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ help
+ Chose this option to enable the ION Memory Manager.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
new file mode 100644
index 000000000000..d1ddebb74a3f
--- /dev/null
+++ b/drivers/gpu/ion/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
new file mode 100644
index 000000000000..c30b8fb41a79
--- /dev/null
+++ b/drivers/gpu/ion/ion.c
@@ -0,0 +1,1358 @@
+/*
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/ion.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "ion_priv.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct rb_root heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @lock: lock protecting the tree of handles
+ * @heap_mask: mask of all supported heaps
+ * @name: used for debugging
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct mutex lock;
+ unsigned int heap_mask;
+ const char *name;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @dmap_cnt: count of times this client has mapped for dma
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return ((buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ buffer->flags = flags;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+ if (ret) {
+ kfree(buffer);
+ return ERR_PTR(ret);
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (IS_ERR_OR_NULL(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
+ i) {
+ if (sg_dma_len(sg) == PAGE_SIZE)
+ continue;
+ pr_err("%s: cached mappings that will be faulted in "
+ "must have pagewise sg_lists\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ion_buffer_alloc_dirty(buffer);
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
+ mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
+ ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
+ return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+static void ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_device *dev = buffer->dev;
+
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->heap->ops->free(buffer);
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+ if (buffer->flags & ION_FLAG_CACHED)
+ kfree(buffer->dirty);
+ kfree(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ RB_CLEAR_NODE(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+ return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ if (handle->buffer == buffer)
+ return handle;
+ }
+ return NULL;
+}
+
+static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
+ node);
+ if (handle < handle_node)
+ n = n->rb_left;
+ else if (handle > handle_node)
+ n = n->rb_right;
+ else
+ return true;
+ }
+ return false;
+}
+
+static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle < entry)
+ p = &(*p)->rb_left;
+ else if (handle > entry)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_mask,
+ unsigned int flags)
+{
+ struct rb_node *n;
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+
+ pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
+ align, heap_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ if (WARN_ON(!len))
+ return ERR_PTR(-EINVAL);
+
+ len = PAGE_ALIGN(len);
+
+ down_read(&dev->lock);
+ for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+ struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+ /* if the client doesn't support this heap type */
+ if (!((1 << heap->type) & client->heap_mask))
+ continue;
+ /* if the caller didn't specify this heap type */
+ if (!((1 << heap->id) & heap_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR_OR_NULL(buffer))
+ break;
+ }
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ if (!IS_ERR(handle)) {
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ }
+
+
+ return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+ ion_handle_put(handle);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAPS] = {0};
+ const char *names[ION_NUM_HEAPS] = {0};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ enum ion_heap_type type = handle->buffer->heap->type;
+
+ if (!names[type])
+ names[type] = handle->buffer->heap->name;
+ sizes[type] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAPS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ char debug_name[64];
+ pid_t pid;
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client) {
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ mutex_init(&client->lock);
+ client->name = name;
+ client->heap_mask = heap_mask;
+ client->task = task;
+ client->pid = pid;
+
+ down_write(&dev->lock);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
+
+ snprintf(debug_name, 64, "%u", client->pid);
+ client->debug_root = debugfs_create_file(debug_name, 0664,
+ dev->debug_root, client,
+ &debug_client_fops);
+ up_write(&dev->lock);
+
+ return client;
+}
+
+void ion_client_destroy(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+ down_write(&dev->lock);
+ if (client->task)
+ put_task_struct(client->task);
+ rb_erase(&client->node, &dev->clients);
+ debugfs_remove_recursive(client->debug_root);
+ up_write(&dev->lock);
+
+ kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+}
+
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
+{
+ unsigned long pages = buffer->sg_table->nents;
+ unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
+
+ buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
+ if (!buffer->dirty)
+ return -ENOMEM;
+ return 0;
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+ struct ion_vma_list *vma_list;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+
+ if (!ion_buffer_fault_user_mappings(buffer))
+ return;
+
+ mutex_lock(&buffer->lock);
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (!test_bit(i, buffer->dirty))
+ continue;
+ dma_sync_sg_for_device(dev, sg, 1, dir);
+ clear_bit(i, buffer->dirty);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct scatterlist *sg;
+ int i;
+
+ mutex_lock(&buffer->lock);
+ set_bit(vmf->pgoff, buffer->dirty);
+
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (i != vmf->pgoff)
+ continue;
+ dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
+ vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ sg_page(sg));
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ if (!vaddr)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
+};
+
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+ int fd;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return PTR_ERR(dmabuf);
+ }
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR_OR_NULL(handle)) {
+ ion_handle_get(handle);
+ goto end;
+ }
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+ ion_handle_add(client, handle);
+end:
+ mutex_unlock(&client->lock);
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_allocation_data data;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ data.handle = ion_alloc(client, data.len, data.align,
+ data.heap_mask, data.flags);
+
+ if (IS_ERR(data.handle))
+ return PTR_ERR(data.handle);
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ ion_free(client, data.handle);
+ return -EFAULT;
+ }
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle_data data;
+ bool valid;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_handle_data)))
+ return -EFAULT;
+ mutex_lock(&client->lock);
+ valid = ion_handle_validate(client, data.handle);
+ mutex_unlock(&client->lock);
+ if (!valid)
+ return -EINVAL;
+ ion_free(client, data.handle);
+ break;
+ }
+ case ION_IOC_SHARE:
+ {
+ struct ion_fd_data data;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ data.fd = ion_share_dma_buf(client, data.handle);
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ if (data.fd < 0)
+ return data.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_fd_data data;
+ int ret = 0;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+ data.handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(data.handle)) {
+ ret = PTR_ERR(data.handle);
+ data.handle = NULL;
+ }
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+ if (ret < 0)
+ return ret;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ struct ion_fd_data data;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_fd_data)))
+ return -EFAULT;
+ ion_sync_for_device(client, data.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ struct ion_device *dev = client->dev;
+ struct ion_custom_data data;
+
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_custom_data)))
+ return -EFAULT;
+ return dev->custom_ioctl(client, data.cmd, data.arg);
+ }
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_destroy(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ client = ion_client_create(dev, -1, "user");
+ if (IS_ERR_OR_NULL(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ enum ion_heap_type type)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->type == type)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->type);
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16u\n", client->name,
+ client->pid, size);
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->type != heap->type)
+ continue;
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
+ buffer->pid, buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount));
+ total_orphaned_size += buffer->size;
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16u\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ struct rb_node **p = &dev->heaps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_heap *entry;
+
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
+ heap->dev = dev;
+ down_write(&dev->lock);
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_heap, node);
+
+ if (heap->id < entry->id) {
+ p = &(*p)->rb_left;
+ } else if (heap->id > entry->id ) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: can not insert multiple heaps with "
+ "id %d\n", __func__, heap->id);
+ goto end;
+ }
+ }
+
+ rb_link_node(&heap->node, parent, p);
+ rb_insert_color(&heap->node, &dev->heaps);
+ debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+ &debug_heap_fops);
+end:
+ up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (IS_ERR_OR_NULL(idev->debug_root))
+ pr_err("ion: failed to create debug files.\n");
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ idev->heaps = RB_ROOT;
+ idev->clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+ ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
new file mode 100644
index 000000000000..ce8d311968f6
--- /dev/null
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -0,0 +1,182 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ *addr = buffer->priv_phys;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_phys = ion_carveout_allocate(heap, size, align);
+ return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+
+ ion_carveout_free(heap, buffer->priv_phys, buffer->size);
+ buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
+ 0);
+ return table;
+}
+
+void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ sg_free_table(buffer->sg_table);
+}
+
+void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ int mtype = MT_MEMORY_NONCACHED;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ mtype = MT_MEMORY;
+
+ return __arm_ioremap(buffer->priv_phys, buffer->size,
+ mtype);
+}
+
+void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ __arm_iounmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ return;
+}
+
+int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ return remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ pgprot_noncached(vma->vm_page_prot));
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+ .map_user = ion_carveout_heap_map_user,
+ .map_kernel = ion_carveout_heap_map_kernel,
+ .unmap_kernel = ion_carveout_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
new file mode 100644
index 000000000000..8ce3c1907bad
--- /dev/null
+++ b/drivers/gpu/ion/ion_heap.c
@@ -0,0 +1,72 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include "ion_priv.h"
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
new file mode 100644
index 000000000000..6ab10e3bdf5b
--- /dev/null
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -0,0 +1,280 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/shrinker.h>
+#include "ion_priv.h"
+
+/* #define DEBUG_PAGE_POOL_SHRINKER */
+
+static struct plist_head pools = PLIST_HEAD_INIT(pools);
+static struct shrinker shrinker;
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+
+ dma_sync_single_for_device(NULL, pfn_to_dma(NULL, page_to_pfn(page)),
+ PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+static int debug_drop_pools_set(void *data, u64 val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ shrinker.shrink(&shrinker, &sc);
+ return 0;
+}
+
+static int debug_drop_pools_get(void *data, u64 *val)
+{
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = shrinker.shrink(&shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
+ debug_drop_pools_set, "%llu\n");
+
+static int debug_grow_pools_set(void *data, u64 val)
+{
+ struct ion_page_pool *pool;
+ struct page *page;
+
+ plist_for_each_entry(pool, &pools, list) {
+ if (val != pool->list.prio)
+ continue;
+ page = ion_page_pool_alloc_pages(pool);
+ if (page)
+ ion_page_pool_add(pool, page);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
+ debug_grow_pools_set, "%llu\n");
+#endif
+
+static int ion_page_pool_total(bool high)
+{
+ struct ion_page_pool *pool;
+ int total = 0;
+
+ plist_for_each_entry(pool, &pools, list) {
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ }
+ return total;
+}
+
+static int ion_page_pool_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_page_pool *pool;
+ int nr_freed = 0;
+ int i;
+ bool high;
+ int nr_to_scan = sc->nr_to_scan;
+
+ if (sc->gfp_mask & __GFP_HIGHMEM)
+ high = true;
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(high);
+
+ plist_for_each_entry(pool, &pools, list) {
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+ nr_to_scan -= i;
+ }
+
+ return ion_page_pool_total(high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+ plist_add(&pool->list, &pools);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ plist_del(&pool->list, &pools);
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ shrinker.shrink = ion_page_pool_shrink;
+ shrinker.seeks = DEFAULT_SEEKS;
+ shrinker.batch = 0;
+ register_shrinker(&shrinker);
+#ifdef DEBUG_PAGE_POOL_SHRINKER
+ debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
+ &debug_drop_pools_fops);
+ debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
+ &debug_grow_pools_fops);
+#endif
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
new file mode 100644
index 000000000000..21c196305bff
--- /dev/null
+++ b/drivers/gpu/ion/ion_priv.h
@@ -0,0 +1,257 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/ion.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @dirty: bitmask representing which pages of this buffer have
+ * been dirtied by the cpu and need cache maintenance
+ * before dma
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+*/
+struct ion_buffer {
+ struct kref ref;
+ struct rb_node node;
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct sg_table *sg_table;
+ unsigned long *dirty;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+};
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct rb_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ int id;
+ const char *name;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ void *(*alloc)(struct ion_page_pool *pool);
+ void (*free)(struct ion_page_pool *pool, struct page *page);
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
new file mode 100644
index 000000000000..c3d75fa11632
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -0,0 +1,494 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY)
+ & ~__GFP_WAIT;
+static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return 0;
+ dma_sync_single_for_device(NULL,
+ pfn_to_dma(NULL, page_to_pfn(page)),
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return 0;
+
+ if (split_pages)
+ split_page(page, order);
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ int i;
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ /* zero the pages before returning them to the pool for
+ security. This uses vmap as we want to set the pgprot so
+ the writes to occur to noncached mappings, as the pool's
+ purpose is to keep the pages out of the cache */
+ for (i = 0; i < (1 << order); i++) {
+ struct page *sub_page = page + i;
+ void *addr = vmap(&sub_page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ memset(addr, 0, PAGE_SIZE);
+ vunmap(addr);
+ }
+ ion_page_pool_free(pool, page);
+ } else if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
+ if (!page)
+ continue;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ info->page = page;
+ info->order = orders[i];
+ return info;
+ }
+ return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ if (split_pages)
+ ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
+ GFP_KERNEL);
+ else
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ if (split_pages) {
+ for (i = 0; i < (1 << info->order); i++) {
+ sg_set_page(sg, page + i, PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ }
+ } else {
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
+ 0);
+ sg = sg_next(sg);
+ }
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry(info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+void *ion_system_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->priv_virt;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return 0;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++) {
+ *(tmp++) = page++;
+ }
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ return vaddr;
+}
+
+void ion_system_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg_dma_len(sg);
+
+ if (offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg_dma_len(sg) - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_system_heap_map_kernel,
+ .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_user = ion_system_heap_map_user,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_heap *heap;
+ int i;
+
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ buffer->priv_virt = kzalloc(len, GFP_KERNEL);
+ if (!buffer->priv_virt)
+ return -ENOMEM;
+ return 0;
+}
+
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ kfree(buffer->priv_virt);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ *addr = virt_to_phys(buffer->priv_virt);
+ *len = buffer->size;
+ return 0;
+}
+
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+ 0);
+ return table;
+}
+
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+}
+
+int ion_system_contig_heap_map_user(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
+ return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
+ .map_kernel = ion_system_heap_map_kernel,
+ .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_user = ion_system_contig_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
new file mode 100644
index 000000000000..692458e07b5e
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_mapper.c
@@ -0,0 +1,114 @@
+/*
+ * drivers/gpu/ion/ion_system_mapper.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+/*
+ * This mapper is valid for any heap that allocates memory that already has
+ * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
+ * pages obtained via io_remap, etc.
+ */
+static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping **mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+ pr_err("%s: attempting to map an unsupported heap\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ /* XXX REVISIT ME!!! */
+ *((unsigned long *)mapping) = (unsigned long)buffer->priv;
+ return buffer->priv;
+}
+
+static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping *mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask))
+ pr_err("%s: attempting to unmap an unsupported heap\n",
+ __func__);
+}
+
+static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct ion_mapping *mapping)
+{
+ if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+ pr_err("%s: attempting to unmap an unsupported heap\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ return buffer->priv;
+}
+
+static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma,
+ struct ion_mapping *mapping)
+{
+ int ret;
+
+ switch (buffer->heap->type) {
+ case ION_HEAP_KMALLOC:
+ {
+ unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
+ ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ break;
+ }
+ case ION_HEAP_VMALLOC:
+ ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
+ break;
+ default:
+ pr_err("%s: attempting to map unsupported heap to userspace\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ion_mapper_ops ops = {
+ .map = ion_kernel_mapper_map,
+ .map_kernel = ion_kernel_mapper_map_kernel,
+ .map_user = ion_kernel_mapper_map_user,
+ .unmap = ion_kernel_mapper_unmap,
+};
+
+struct ion_mapper *ion_system_mapper_create(void)
+{
+ struct ion_mapper *mapper;
+ mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
+ if (!mapper)
+ return ERR_PTR(-ENOMEM);
+ mapper->type = ION_SYSTEM_MAPPER;
+ mapper->ops = &ops;
+ mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
+ return mapper;
+}
+
+void ion_system_mapper_destroy(struct ion_mapper *mapper)
+{
+ kfree(mapper);
+}
+
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
new file mode 100644
index 000000000000..11cd003fb08f
--- /dev/null
+++ b/drivers/gpu/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
new file mode 100644
index 000000000000..7af6e168ff4c
--- /dev/null
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -0,0 +1,96 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion_priv.h"
+
+struct ion_device *idev;
+struct ion_mapper *tegra_user_mapper;
+int num_heaps;
+struct ion_heap **heaps;
+
+int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+static int __init ion_init(void)
+{
+ return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+ platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 21b196c394b1..5464974a06e7 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1256,8 +1256,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
* UGCI) cram a lot of unrelated inputs into the
* same interface. */
hidinput->report = report;
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
hidinput = NULL;
@@ -1266,8 +1267,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
if (hidinput) {
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 25ddf3e3aec6..106c875ef2bb 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -351,8 +351,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 1;
}
-static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+static int magicmouse_setup_input(struct hid_device *hdev, struct hid_input *hi)
{
+ struct input_dev *input = hi->input;
int error;
__set_bit(EV_KEY, input->evbit);
@@ -493,17 +494,6 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- /* We do this after hid-input is done parsing reports so that
- * hid-input uses the most natural button and axis IDs.
- */
- if (msc->input) {
- ret = magicmouse_setup_input(msc->input, hdev);
- if (ret) {
- hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
- goto err_stop_hw;
- }
- }
-
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID);
@@ -568,6 +558,7 @@ static struct hid_driver magicmouse_driver = {
.remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
+ .input_configured = magicmouse_setup_input,
};
static int __init magicmouse_init(void)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 61543c02ea0b..7cb83d9086cd 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -363,6 +363,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (usage->usage_index)
prev_usage = &field->usage[usage->usage_index - 1];
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
@@ -726,7 +736,7 @@ static void mt_post_parse(struct mt_device *td)
}
}
-static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
@@ -735,7 +745,7 @@ static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
/* Only initialize slots for MT input devices */
if (!test_bit(ABS_MT_POSITION_X, input->absbit))
- return;
+ return 0;
if (!td->maxcontacts)
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
@@ -753,6 +763,8 @@ static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
td->mt_flags = 0;
+
+ return 0;
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 86d7f6d858b1..d867e6bb2be1 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -19,6 +19,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/vexpress.h>
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index fe4bcd7c5b12..05e996fafc9d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -8,6 +8,7 @@ config HID_SENSOR_ACCEL_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 4a5f639bc684..bbad9b94cd75 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -411,7 +411,11 @@ static int ad7266_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- st->vref_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 04b013561f0f..a526c0e3aaa8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -80,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
*timestamp = pf->timestamp;
}
- iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
+ iio_push_to_buffers(idev, (u8 *)st->buffer);
iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index b5669be6f396..03b25b3dc71e 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1605,19 +1605,20 @@ static int max1363_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(st->client->irq, indio_dev);
+ if (client->irq)
+ free_irq(st->client->irq, indio_dev);
error_uninit_buffer:
iio_buffer_unregister(indio_dev);
error_cleanup_buffer:
max1363_buffer_cleanup(indio_dev);
error_free_available_scan_masks:
kfree(indio_dev->available_scan_masks);
-error_unregister_map:
- iio_map_array_unregister(indio_dev, client->dev.platform_data);
error_disable_reg:
regulator_disable(st->reg);
error_put_reg:
regulator_put(st->reg);
+error_unregister_map:
+ iio_map_array_unregister(indio_dev, client->dev.platform_data);
error_free_device:
iio_device_free(indio_dev);
error_out:
@@ -1635,10 +1636,8 @@ static int max1363_remove(struct i2c_client *client)
iio_buffer_unregister(indio_dev);
max1363_buffer_cleanup(indio_dev);
kfree(indio_dev->available_scan_masks);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
- }
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
iio_map_array_unregister(indio_dev, client->dev.platform_data);
iio_device_free(indio_dev);
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index ae10778da7aa..1178121b55b0 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
config HID_SENSOR_IIO_COMMON
tristate "Common modules for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB
- select IIO_TRIGGER if IIO_BUFFER
+ select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
help
Say yes here to build support for HID sensor to use
HID sensor common processing for attributes and IIO triggers.
@@ -14,6 +14,17 @@ config HID_SENSOR_IIO_COMMON
HID sensor drivers, this module contains processing for those
attributes.
+config HID_SENSOR_IIO_TRIGGER
+ tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+ select IIO_TRIGGER
+ help
+ Say yes here to build trigger support for HID sensors.
+ Triggers will be send if all requested attributes were read.
+
+ If this driver is compiled as a module, it will be named
+ hid-sensor-trigger.
+
config HID_SENSOR_ENUM_BASE_QUIRKS
bool "ENUM base quirks for HID Sensor IIO drivers"
depends on HID_SENSOR_IIO_COMMON
diff --git a/drivers/iio/common/hid-sensors/Makefile b/drivers/iio/common/hid-sensors/Makefile
index 1f463e00c242..22e7c5a82325 100644
--- a/drivers/iio/common/hid-sensors/Makefile
+++ b/drivers/iio/common/hid-sensors/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 6c7898c765d9..483fc379a2da 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -406,7 +406,11 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
goto error_free_reg;
}
- st->vref = regulator_get_voltage(st->vref_reg);
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref = ret;
} else {
st->vref = st->chip_info->int_vref;
ctrl |= AD5380_CTRL_INT_VREF_EN;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 29f653dab2f7..f5583aedfb59 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -226,7 +226,11 @@ static int ad5446_probe(struct device *dev, const char *name,
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
indio_dev = iio_device_alloc(sizeof(*st));
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index b2a31a0468ed..0661829f2773 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -296,7 +296,11 @@ static int ad5504_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e9947969f9fe..f6e116627b71 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -238,7 +238,11 @@ static int ad5624r_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 36e51382ae52..ca9609d7a15c 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -332,7 +332,11 @@ static int ad5686_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
st->chip_info =
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index c84180f23139..6407b5407ddd 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -365,7 +365,11 @@ static int ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+ ret = regulator_get_voltage(st->reg_vdd);
+ if (ret < 0)
+ goto error_disable_reg_pos;
+
+ pos_voltage_uv = ret;
}
st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +378,11 @@ static int ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+ ret = regulator_get_voltage(st->reg_vss);
+ if (ret < 0)
+ goto error_disable_reg_neg;
+
+ neg_voltage_uv = ret;
}
st->pwr_down = true;
@@ -428,6 +436,7 @@ error_put_reg_neg:
if (!IS_ERR(st->reg_vss))
regulator_put(st->reg_vss);
+error_disable_reg_pos:
if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
error_put_reg_pos:
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index e5033b4cfba0..a884252ac66b 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
} while (r_cnt == 0);
- tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+ tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
st->r0_fract = do_div(tmp, st->r1_mod);
st->r0_int = tmp;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 48ed1483ff27..96b68f63a902 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -17,6 +17,7 @@ config HID_SENSOR_GYRO_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 1763c9bcb98a..dbf80abc834f 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -47,6 +47,7 @@ config HID_SENSOR_ALS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index c1f0cdd57037..ff11d68225cf 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -8,6 +8,7 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 55f7e57d4e42..771b64cd5399 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -174,6 +174,15 @@ config INPUT_APMPOWER
To compile this driver as a module, choose M here: the
module will be called apm-power.
+config INPUT_KEYRESET
+ tristate "Reset key"
+ depends on INPUT
+ ---help---
+ Say Y here if you want to reboot when some keys are pressed;
+
+ To compile this driver as a module, choose M here: the
+ module will be called keyreset.
+
comment "Input Device Drivers"
source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 5ca3f631497f..d2650b7cc4a3 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o
+
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f0f8928b3c8a..c9d824a95239 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -24,6 +24,7 @@
#include <linux/major.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/wakelock.h>
#include "input-compat.h"
struct evdev {
@@ -44,6 +45,9 @@ struct evdev_client {
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+ struct wake_lock wake_lock;
+ bool use_wake_lock;
+ char name[28];
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
@@ -71,10 +75,14 @@ static void __pass_event(struct evdev_client *client,
client->buffer[client->tail].value = 0;
client->packet_head = client->tail;
+ if (client->use_wake_lock)
+ wake_unlock(&client->wake_lock);
}
if (event->type == EV_SYN && event->code == SYN_REPORT) {
client->packet_head = client->head;
+ if (client->use_wake_lock)
+ wake_lock(&client->wake_lock);
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
}
@@ -289,6 +297,8 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
+ if (client->use_wake_lock)
+ wake_lock_destroy(&client->wake_lock);
kfree(client);
evdev_close_device(evdev);
@@ -318,8 +328,12 @@ static int evdev_open(struct inode *inode, struct file *file)
if (!client)
return -ENOMEM;
+ client->clkid = CLOCK_MONOTONIC;
+
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
+ snprintf(client->name, sizeof(client->name), "%s-%d",
+ dev_name(&evdev->dev), task_tgid_vnr(current));
client->evdev = evdev;
evdev_attach_client(evdev, client);
@@ -386,6 +400,9 @@ static int evdev_fetch_next_event(struct evdev_client *client,
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
+ if (client->use_wake_lock &&
+ client->packet_head == client->tail)
+ wake_unlock(&client->wake_lock);
}
spin_unlock_irq(&client->buffer_lock);
@@ -674,6 +691,35 @@ static int evdev_handle_mt_request(struct input_dev *dev,
return 0;
}
+static int evdev_enable_suspend_block(struct evdev *evdev,
+ struct evdev_client *client)
+{
+ if (client->use_wake_lock)
+ return 0;
+
+ spin_lock_irq(&client->buffer_lock);
+ wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
+ client->use_wake_lock = true;
+ if (client->packet_head != client->tail)
+ wake_lock(&client->wake_lock);
+ spin_unlock_irq(&client->buffer_lock);
+ return 0;
+}
+
+static int evdev_disable_suspend_block(struct evdev *evdev,
+ struct evdev_client *client)
+{
+ if (!client->use_wake_lock)
+ return 0;
+
+ spin_lock_irq(&client->buffer_lock);
+ client->use_wake_lock = false;
+ wake_lock_destroy(&client->wake_lock);
+ spin_unlock_irq(&client->buffer_lock);
+
+ return 0;
+}
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -755,6 +801,15 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSKEYCODE_V2:
return evdev_handle_set_keycode_v2(dev, p);
+
+ case EVIOCGSUSPENDBLOCK:
+ return put_user(client->use_wake_lock, ip);
+
+ case EVIOCSSUSPENDBLOCK:
+ if (p)
+ return evdev_enable_suspend_block(evdev, client);
+ else
+ return evdev_disable_suspend_block(evdev, client);
}
size = _IOC_SIZE(cmd);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 000000000000..36208fe0baae
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,239 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+
+struct keyreset_state {
+ struct input_handler input_handler;
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+ spinlock_t lock;
+ int key_down_target;
+ int key_down;
+ int key_up;
+ int restart_disabled;
+ int (*reset_fn)(void);
+};
+
+int restart_requested;
+static void deferred_restart(struct work_struct *dummy)
+{
+ restart_requested = 2;
+ sys_sync();
+ restart_requested = 3;
+ kernel_restart(NULL);
+}
+static DECLARE_WORK(restart_work, deferred_restart);
+
+static void keyreset_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ unsigned long flags;
+ struct keyreset_state *state = handle->private;
+
+ if (type != EV_KEY)
+ return;
+
+ if (code >= KEY_MAX)
+ return;
+
+ if (!test_bit(code, state->keybit))
+ return;
+
+ spin_lock_irqsave(&state->lock, flags);
+ if (!test_bit(code, state->key) == !value)
+ goto done;
+ __change_bit(code, state->key);
+ if (test_bit(code, state->upbit)) {
+ if (value) {
+ state->restart_disabled = 1;
+ state->key_up++;
+ } else
+ state->key_up--;
+ } else {
+ if (value)
+ state->key_down++;
+ else
+ state->key_down--;
+ }
+ if (state->key_down == 0 && state->key_up == 0)
+ state->restart_disabled = 0;
+
+ pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
+ state->key_down, state->key_up, state->restart_disabled);
+
+ if (value && !state->restart_disabled &&
+ state->key_down == state->key_down_target) {
+ state->restart_disabled = 1;
+ if (restart_requested)
+ panic("keyboard reset failed, %d", restart_requested);
+ if (state->reset_fn) {
+ restart_requested = state->reset_fn();
+ } else {
+ pr_info("keyboard reset\n");
+ schedule_work(&restart_work);
+ restart_requested = 1;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keyreset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i;
+ int ret;
+ struct input_handle *handle;
+ struct keyreset_state *state =
+ container_of(handler, struct keyreset_state, input_handler);
+
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "keyreset";
+ handle->private = state;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("using input dev %s for key reset\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keyreset_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id keyreset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(input, keyreset_ids);
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+ int ret;
+ int key, *keyp;
+ struct keyreset_state *state;
+ struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_init(&state->lock);
+ keyp = pdata->keys_down;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ state->key_down_target++;
+ __set_bit(key, state->keybit);
+ }
+ if (pdata->keys_up) {
+ keyp = pdata->keys_up;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ __set_bit(key, state->keybit);
+ __set_bit(key, state->upbit);
+ }
+ }
+
+ if (pdata->reset_fn)
+ state->reset_fn = pdata->reset_fn;
+
+ state->input_handler.event = keyreset_event;
+ state->input_handler.connect = keyreset_connect;
+ state->input_handler.disconnect = keyreset_disconnect;
+ state->input_handler.name = KEYRESET_NAME;
+ state->input_handler.id_table = keyreset_ids;
+ ret = input_register_handler(&state->input_handler);
+ if (ret) {
+ kfree(state);
+ return ret;
+ }
+ platform_set_drvdata(pdev, state);
+ return 0;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+ struct keyreset_state *state = platform_get_drvdata(pdev);
+ input_unregister_handler(&state->input_handler);
+ kfree(state);
+ return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+ .driver.name = KEYRESET_NAME,
+ .probe = keyreset_probe,
+ .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+ return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+ return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 259ef31abb18..a9c07014d537 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -299,6 +299,17 @@ config INPUT_ATI_REMOTE2
To compile this driver as a module, choose M here: the module will be
called ati_remote2.
+config INPUT_KEYCHORD
+ tristate "Key chord input driver support"
+ help
+ Say Y here if you want to enable the key chord driver
+ accessible at /dev/keychord. This driver can be used
+ for receiving notifications when client specified key
+ combinations are pressed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called keychord.
+
config INPUT_KEYSPAN_REMOTE
tristate "Keyspan DMR USB remote control"
depends on USB_ARCH_HAS_HCD
@@ -434,6 +445,11 @@ config INPUT_SGI_BTNS
To compile this driver as a module, choose M here: the
module will be called sgi_btns.
+config INPUT_GPIO
+ tristate "GPIO driver support"
+ help
+ Say Y here if you want to support gpio based keys, wheels etc...
+
config HP_SDC_RTC
tristate "HP SDC Real Time Clock"
depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 1f1e1b109d9d..7b45826ab582 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -28,8 +28,10 @@ obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 000000000000..0acf4a576f53
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_axis_info *info;
+ uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+ [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+ [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+ [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+ [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+ [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+ [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+ [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+ [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+ [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */
+ [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */
+ [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */
+ [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */
+ [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */
+ [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */
+ [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */
+ [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */
+ [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */
+ [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+ struct gpio_event_axis_info *ai = as->info;
+ int i;
+ int change;
+ uint16_t state = 0;
+ uint16_t pos;
+ uint16_t old_pos = as->pos;
+ for (i = ai->count - 1; i >= 0; i--)
+ state = (state << 1) | gpio_get_value(ai->gpio[i]);
+ pos = ai->map(ai, state);
+ if (ai->flags & GPIOEAF_PRINT_RAW)
+ pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+ ai->type, ai->code, state, old_pos, pos);
+ if (report && pos != old_pos) {
+ if (ai->type == EV_REL) {
+ change = (ai->decoded_size + pos - old_pos) %
+ ai->decoded_size;
+ if (change > ai->decoded_size / 2)
+ change -= ai->decoded_size;
+ if (change == ai->decoded_size / 2) {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d unknown direction, "
+ "pos %d -> %d\n", ai->type,
+ ai->code, old_pos, pos);
+ change = 0; /* no closest direction */
+ }
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d change %d\n",
+ ai->type, ai->code, change);
+ input_report_rel(as->input_devs->dev[ai->dev],
+ ai->code, change);
+ } else {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d now %d\n",
+ ai->type, ai->code, pos);
+ input_event(as->input_devs->dev[ai->dev],
+ ai->type, ai->code, pos);
+ }
+ input_sync(as->input_devs->dev[ai->dev]);
+ }
+ as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_axis_state *as = dev_id;
+ gpio_event_update_axis(as, 1);
+ return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ int irq;
+ struct gpio_event_axis_info *ai;
+ struct gpio_axis_state *as;
+
+ ai = container_of(info, struct gpio_event_axis_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ for (i = 0; i < ai->count; i++)
+ disable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ for (i = 0; i < ai->count; i++)
+ enable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (as == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_axis_state_failed;
+ }
+ as->input_devs = input_devs;
+ as->info = ai;
+ if (ai->dev >= input_devs->count) {
+ pr_err("gpio_event_axis: bad device index %d >= %d "
+ "for %d:%d\n", ai->dev, input_devs->count,
+ ai->type, ai->code);
+ ret = -EINVAL;
+ goto err_bad_device_index;
+ }
+
+ input_set_capability(input_devs->dev[ai->dev],
+ ai->type, ai->code);
+ if (ai->type == EV_ABS) {
+ input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+ 0, ai->decoded_size - 1, 0, 0);
+ }
+ for (i = 0; i < ai->count; i++) {
+ ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+ if (ret < 0)
+ goto err_request_gpio_failed;
+ ret = gpio_direction_input(ai->gpio[i]);
+ if (ret < 0)
+ goto err_gpio_direction_input_failed;
+ ret = irq = gpio_to_irq(ai->gpio[i]);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+ ret = request_irq(irq, gpio_axis_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "gpio_event_axis", as);
+ if (ret < 0)
+ goto err_request_irq_failed;
+ }
+ gpio_event_update_axis(as, 0);
+ return 0;
+ }
+
+ ret = 0;
+ as = *data;
+ for (i = ai->count - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+ ;
+ }
+err_bad_device_index:
+ kfree(as);
+ *data = NULL;
+err_alloc_axis_state_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 000000000000..4314cf096aa8
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,239 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_platform_data *info;
+ void *state[0];
+};
+
+static int gpio_input_event(
+ struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+ int i;
+ int devnr;
+ int ret = 0;
+ int tmp_ret;
+ struct gpio_event_info **ii;
+ struct gpio_event *ip = input_get_drvdata(dev);
+
+ for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+ if (ip->input_devs->dev[devnr] == dev)
+ break;
+ if (devnr == ip->input_devs->count) {
+ pr_err("gpio_input_event: unknown device %p\n", dev);
+ return -EIO;
+ }
+
+ for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->event) {
+ tmp_ret = (*ii)->event(ip->input_devs, *ii,
+ &ip->state[i],
+ devnr, type, code, value);
+ if (tmp_ret)
+ ret = tmp_ret;
+ }
+ }
+ return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+ int i;
+ int ret;
+ struct gpio_event_info **ii;
+
+ if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+ ii = ip->info->info;
+ for (i = 0; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->func == NULL) {
+ ret = -ENODEV;
+ pr_err("gpio_event_probe: Incomplete pdata, "
+ "no function\n");
+ goto err_no_func;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+ continue;
+ ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+ func);
+ if (ret) {
+ pr_err("gpio_event_probe: function failed\n");
+ goto err_func_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ i = ip->info->info_count;
+ ii = ip->info->info + i;
+ while (i > 0) {
+ i--;
+ ii--;
+ if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+ continue;
+ (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+ ;
+ }
+ return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+ if (ip->info->power)
+ ip->info->power(ip->info, 1);
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpio_event *ip;
+ struct gpio_event_platform_data *event_info;
+ int dev_count = 1;
+ int i;
+ int registered = 0;
+
+ event_info = pdev->dev.platform_data;
+ if (event_info == NULL) {
+ pr_err("gpio_event_probe: No pdata\n");
+ return -ENODEV;
+ }
+ if ((!event_info->name && !event_info->names[0]) ||
+ !event_info->info || !event_info->info_count) {
+ pr_err("gpio_event_probe: Incomplete pdata\n");
+ return -ENODEV;
+ }
+ if (!event_info->name)
+ while (event_info->names[dev_count])
+ dev_count++;
+ ip = kzalloc(sizeof(*ip) +
+ sizeof(ip->state[0]) * event_info->info_count +
+ sizeof(*ip->input_devs) +
+ sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+ if (ip == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ ip->input_devs = (void*)&ip->state[event_info->info_count];
+ platform_set_drvdata(pdev, ip);
+
+ for (i = 0; i < dev_count; i++) {
+ struct input_dev *input_dev = input_allocate_device();
+ if (input_dev == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: "
+ "Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ input_set_drvdata(input_dev, ip);
+ input_dev->name = event_info->name ?
+ event_info->name : event_info->names[i];
+ input_dev->event = gpio_input_event;
+ ip->input_devs->dev[i] = input_dev;
+ }
+ ip->input_devs->count = dev_count;
+ ip->info = event_info;
+ if (event_info->power)
+ ip->info->power(ip->info, 1);
+
+ err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+ if (err)
+ goto err_call_all_func_failed;
+
+ for (i = 0; i < dev_count; i++) {
+ err = input_register_device(ip->input_devs->dev[i]);
+ if (err) {
+ pr_err("gpio_event_probe: Unable to register %s "
+ "input device\n", ip->input_devs->dev[i]->name);
+ goto err_input_register_device_failed;
+ }
+ registered++;
+ }
+
+ return 0;
+
+err_input_register_device_failed:
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+ if (event_info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < registered; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ for (i = dev_count - 1; i >= registered; i--) {
+ input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+ ;
+ }
+ kfree(ip);
+err_kp_alloc_failed:
+ return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+ struct gpio_event *ip = platform_get_drvdata(pdev);
+ int i;
+
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < ip->input_devs->count; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ kfree(ip);
+ return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+ .probe = gpio_event_probe,
+ .remove = gpio_event_remove,
+ .driver = {
+ .name = GPIO_EVENT_DEV_NAME,
+ },
+};
+
+static int gpio_event_init(void)
+{
+ return platform_driver_register(&gpio_event_driver);
+}
+
+static void __exit gpio_event_exit(void)
+{
+ platform_driver_unregister(&gpio_event_driver);
+}
+
+module_init(gpio_event_init);
+module_exit(gpio_event_exit);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 000000000000..6a0c31510968
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,376 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+enum {
+ DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */
+ DEBOUNCE_PRESSED = BIT(1),
+ DEBOUNCE_NOTPRESSED = BIT(2),
+ DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */
+ DEBOUNCE_POLL = BIT(4), /* Stable polling state */
+
+ DEBOUNCE_UNKNOWN =
+ DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+ struct gpio_input_state *ds;
+ uint8_t debounce;
+};
+
+struct gpio_input_state {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_input_info *info;
+ struct hrtimer timer;
+ int use_irq;
+ int debounce_count;
+ spinlock_t irq_lock;
+ struct wake_lock wake_lock;
+ struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+ int i;
+ int pressed;
+ struct gpio_input_state *ds =
+ container_of(timer, struct gpio_input_state, timer);
+ unsigned gpio_flags = ds->info->flags;
+ unsigned npolarity;
+ int nkeys = ds->info->keymap_size;
+ const struct gpio_event_direct_entry *key_entry;
+ struct gpio_key_state *key_state;
+ unsigned long irqflags;
+ uint8_t debounce;
+ bool sync_needed;
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+#endif
+ key_entry = ds->info->keymap;
+ key_state = ds->key_state;
+ sync_needed = false;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ debounce = key_state->debounce;
+ if (debounce & DEBOUNCE_WAIT_IRQ)
+ continue;
+ if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+ debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+ enable_irq(gpio_to_irq(key_entry->gpio));
+ if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) continue debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+ pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+ if (debounce & DEBOUNCE_POLL) {
+ if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+ ds->debounce_count++;
+ key_state->debounce = DEBOUNCE_UNKNOWN;
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-"
+ "%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ continue;
+ }
+ if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 1\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_PRESSED;
+ continue;
+ }
+ if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 0\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_NOTPRESSED;
+ continue;
+ }
+ /* key is stable */
+ ds->debounce_count--;
+ if (ds->use_irq)
+ key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+ else
+ key_state->debounce |= DEBOUNCE_POLL;
+ if (gpio_flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+ "changed to %d\n", ds->info->type,
+ key_entry->code, i, key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ sync_needed = true;
+ }
+ if (sync_needed) {
+ for (i = 0; i < ds->input_devs->count; i++)
+ input_sync(ds->input_devs->dev[i]);
+ }
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+ }
+#endif
+
+ if (ds->debounce_count)
+ hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+ else if (!ds->use_irq)
+ hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+ else
+ wake_unlock(&ds->wake_lock);
+
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_key_state *ks = dev_id;
+ struct gpio_input_state *ds = ks->ds;
+ int keymap_index = ks - ds->key_state;
+ const struct gpio_event_direct_entry *key_entry;
+ unsigned long irqflags;
+ int pressed;
+
+ if (!ds->use_irq)
+ return IRQ_HANDLED;
+
+ key_entry = &ds->info->keymap[keymap_index];
+
+ if (ds->info->debounce_time.tv64) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+ ks->debounce = DEBOUNCE_UNKNOWN;
+ if (ds->debounce_count++ == 0) {
+ wake_lock(&ds->wake_lock);
+ hrtimer_start(
+ &ds->timer, ds->info->debounce_time,
+ HRTIMER_MODE_REL);
+ }
+ if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_event_input_irq_handler: "
+ "key %x-%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ keymap_index, key_entry->gpio);
+ } else {
+ disable_irq_nosync(irq);
+ ks->debounce = DEBOUNCE_UNSTABLE;
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ } else {
+ pressed = gpio_get_value(key_entry->gpio) ^
+ !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+ if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+ "(%d) changed to %d\n",
+ ds->info->type, key_entry->code, keymap_index,
+ key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ input_sync(ds->input_devs->dev[key_entry->dev]);
+ }
+ return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+ for (i = 0; i < ds->info->keymap_size; i++) {
+ err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_event_input_irq_handler,
+ req_flags, "gpio_keys", &ds->key_state[i]);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: request_irq "
+ "failed for input %d, irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_request_irq_failed;
+ }
+ if (ds->info->info.no_suspend) {
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: "
+ "enable_irq_wake failed for input %d, "
+ "irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_enable_irq_wake_failed;
+ }
+ }
+ }
+ return 0;
+
+ for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+ irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+ free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ unsigned long irqflags;
+ struct gpio_event_input_info *di;
+ struct gpio_input_state *ds = *data;
+
+ di = container_of(info, struct gpio_event_input_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ disable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_cancel(&ds->timer);
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ enable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (ktime_to_ns(di->poll_time) <= 0)
+ di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+ *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+ di->keymap_size, GFP_KERNEL);
+ if (ds == NULL) {
+ ret = -ENOMEM;
+ pr_err("gpio_event_input_func: "
+ "Failed to allocate private data\n");
+ goto err_ds_alloc_failed;
+ }
+ ds->debounce_count = di->keymap_size;
+ ds->input_devs = input_devs;
+ ds->info = di;
+ wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
+ spin_lock_init(&ds->irq_lock);
+
+ for (i = 0; i < di->keymap_size; i++) {
+ int dev = di->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_input_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ di->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], di->type,
+ di->keymap[i].code);
+ ds->key_state[i].ds = ds;
+ ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+ }
+
+ for (i = 0; i < di->keymap_size; i++) {
+ ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+ if (ret) {
+ pr_err("gpio_event_input_func: gpio_request "
+ "failed for %d\n", di->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_input(di->keymap[i].gpio);
+ if (ret) {
+ pr_err("gpio_event_input_func: "
+ "gpio_direction_input failed for %d\n",
+ di->keymap[i].gpio);
+ goto err_gpio_configure_failed;
+ }
+ }
+
+ ret = gpio_event_input_request_irqs(ds);
+
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ ds->use_irq = ret == 0;
+
+ pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+ "mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ ret == 0 ? "interrupt" : "polling");
+
+ hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ds->timer.function = gpio_event_input_timer_func;
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ hrtimer_cancel(&ds->timer);
+ if (ds->use_irq) {
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+ int irq = gpio_to_irq(di->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+ free_irq(irq, &ds->key_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+ gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ wake_lock_destroy(&ds->wake_lock);
+ kfree(ds);
+err_ds_alloc_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 000000000000..eaa9e89d473a
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_matrix_info *keypad_info;
+ struct hrtimer timer;
+ struct wake_lock wake_lock;
+ int current_output;
+ unsigned int use_irq:1;
+ unsigned int key_state_changed:1;
+ unsigned int last_key_state_changed:1;
+ unsigned int some_keys_pressed:2;
+ unsigned int disabled_irq:1;
+ unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int key_index = out * mi->ninputs + in;
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ __clear_bit(key_index, kp->keys_pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "not cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+ int rv = 0;
+ int key_index;
+
+ key_index = out * kp->keypad_info->ninputs + in;
+ while (out < kp->keypad_info->noutputs) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ rv = 1;
+ clear_phantom_key(kp, out, in);
+ }
+ key_index += kp->keypad_info->ninputs;
+ out++;
+ }
+ return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+ int out, in, inp;
+ int key_index;
+
+ if (kp->some_keys_pressed < 3)
+ return;
+
+ for (out = 0; out < kp->keypad_info->noutputs; out++) {
+ inp = -1;
+ key_index = out * kp->keypad_info->ninputs;
+ for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ if (inp == -1) {
+ inp = in;
+ continue;
+ }
+ if (inp >= 0) {
+ if (!restore_keys_for_input(kp, out + 1,
+ inp))
+ break;
+ clear_phantom_key(kp, out, inp);
+ inp = -2;
+ }
+ restore_keys_for_input(kp, out, in);
+ }
+ }
+ }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int pressed = test_bit(key_index, kp->keys_pressed);
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (keycode == KEY_RESERVED) {
+ if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+ pr_info("gpiomatrix: unmapped key, %d-%d "
+ "(%d-%d) changed to %d\n",
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+ pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+ "changed to %d\n", keycode,
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+ }
+ }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+ int i;
+
+ for (i = 0; i < kp->input_devs->count; i++)
+ input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+ int out, in;
+ int key_index;
+ int gpio;
+ struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+ unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+ out = kp->current_output;
+ if (out == mi->noutputs) {
+ out = 0;
+ kp->last_key_state_changed = kp->key_state_changed;
+ kp->key_state_changed = 0;
+ kp->some_keys_pressed = 0;
+ } else {
+ key_index = out * mi->ninputs;
+ for (in = 0; in < mi->ninputs; in++, key_index++) {
+ gpio = mi->input_gpios[in];
+ if (gpio_get_value(gpio) ^ !polarity) {
+ if (kp->some_keys_pressed < 3)
+ kp->some_keys_pressed++;
+ kp->key_state_changed |= !__test_and_set_bit(
+ key_index, kp->keys_pressed);
+ } else
+ kp->key_state_changed |= __test_and_clear_bit(
+ key_index, kp->keys_pressed);
+ }
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, !polarity);
+ else
+ gpio_direction_input(gpio);
+ out++;
+ }
+ kp->current_output = out;
+ if (out < mi->noutputs) {
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, polarity);
+ else
+ gpio_direction_output(gpio, polarity);
+ hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+ if (kp->key_state_changed) {
+ hrtimer_start(&kp->timer, mi->debounce_delay,
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ kp->key_state_changed = kp->last_key_state_changed;
+ }
+ if (kp->key_state_changed) {
+ if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+ remove_phantom_keys(kp);
+ key_index = 0;
+ for (out = 0; out < mi->noutputs; out++)
+ for (in = 0; in < mi->ninputs; in++, key_index++)
+ report_key(kp, key_index, out, in);
+ report_sync(kp);
+ }
+ if (!kp->use_irq || kp->some_keys_pressed) {
+ hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+
+ /* No keys are pressed, reenable interrupt */
+ for (out = 0; out < mi->noutputs; out++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[out], polarity);
+ else
+ gpio_direction_output(mi->output_gpios[out], polarity);
+ }
+ for (in = 0; in < mi->ninputs; in++)
+ enable_irq(gpio_to_irq(mi->input_gpios[in]));
+ wake_unlock(&kp->wake_lock);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+ int i;
+ struct gpio_kp *kp = dev_id;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+
+ if (!kp->use_irq) {
+ /* ignore interrupt while registering the handler */
+ kp->disabled_irq = 1;
+ disable_irq_nosync(irq_in);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < mi->ninputs; i++)
+ disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+ for (i = 0; i < mi->noutputs; i++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[i],
+ !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ gpio_direction_input(mi->output_gpios[i]);
+ }
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long request_flags;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+ switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+ default:
+ request_flags = IRQF_TRIGGER_FALLING;
+ break;
+ case GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_RISING;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+ request_flags = IRQF_TRIGGER_LOW;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_HIGH;
+ break;
+ }
+
+ for (i = 0; i < mi->ninputs; i++) {
+ err = irq = gpio_to_irq(mi->input_gpios[i]);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+ "gpio_kp", kp);
+ if (err) {
+ pr_err("gpiomatrix: request_irq failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ goto err_request_irq_failed;
+ }
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ }
+ disable_irq(irq);
+ if (kp->disabled_irq) {
+ kp->disabled_irq = 0;
+ enable_irq(irq);
+ }
+ }
+ return 0;
+
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int i;
+ int err;
+ int key_count;
+ struct gpio_kp *kp;
+ struct gpio_event_matrix_info *mi;
+
+ mi = container_of(info, struct gpio_event_matrix_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+ /* TODO: disable scanning */
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (mi->keymap == NULL ||
+ mi->input_gpios == NULL ||
+ mi->output_gpios == NULL) {
+ err = -ENODEV;
+ pr_err("gpiomatrix: Incomplete pdata\n");
+ goto err_invalid_platform_data;
+ }
+ key_count = mi->ninputs * mi->noutputs;
+
+ *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+ BITS_TO_LONGS(key_count), GFP_KERNEL);
+ if (kp == NULL) {
+ err = -ENOMEM;
+ pr_err("gpiomatrix: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ kp->input_devs = input_devs;
+ kp->keypad_info = mi;
+ for (i = 0; i < key_count; i++) {
+ unsigned short keyentry = mi->keymap[i];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+ if (dev >= input_devs->count) {
+ pr_err("gpiomatrix: bad device index %d >= "
+ "%d for key code %d\n",
+ dev, input_devs->count, keycode);
+ err = -EINVAL;
+ goto err_bad_keymap;
+ }
+ if (keycode && keycode <= KEY_MAX)
+ input_set_capability(input_devs->dev[dev],
+ EV_KEY, keycode);
+ }
+
+ for (i = 0; i < mi->noutputs; i++) {
+ err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_request_output_gpio_failed;
+ }
+ if (gpio_cansleep(mi->output_gpios[i])) {
+ pr_err("gpiomatrix: unsupported output gpio %d,"
+ " can sleep\n", mi->output_gpios[i]);
+ err = -EINVAL;
+ goto err_output_gpio_configure_failed;
+ }
+ if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+ err = gpio_direction_output(mi->output_gpios[i],
+ !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ err = gpio_direction_input(mi->output_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_configure failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_output_gpio_configure_failed;
+ }
+ }
+ for (i = 0; i < mi->ninputs; i++) {
+ err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "input %d\n", mi->input_gpios[i]);
+ goto err_request_input_gpio_failed;
+ }
+ err = gpio_direction_input(mi->input_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_direction_input failed"
+ " for input %d\n", mi->input_gpios[i]);
+ goto err_gpio_direction_input_failed;
+ }
+ }
+ kp->current_output = mi->noutputs;
+ kp->key_state_changed = 1;
+
+ hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kp->timer.function = gpio_keypad_timer_func;
+ wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+ err = gpio_keypad_request_irqs(kp);
+ kp->use_irq = err == 0;
+
+ pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+ "%s%s in %s mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ kp->use_irq ? "interrupt" : "polling");
+
+ if (kp->use_irq)
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+ }
+
+ err = 0;
+ kp = *data;
+
+ if (kp->use_irq)
+ for (i = mi->noutputs - 1; i >= 0; i--)
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+ hrtimer_cancel(&kp->timer);
+ wake_lock_destroy(&kp->wake_lock);
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+ gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+ ;
+ }
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+ gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+ ;
+ }
+err_bad_keymap:
+ kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+ return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 000000000000..2aac2fad0a17
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value)
+{
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+ if (type != oi->type)
+ return 0;
+ if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+ value = !value;
+ for (i = 0; i < oi->keymap_size; i++)
+ if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+ gpio_set_value(oi->keymap[i].gpio, value);
+ return 0;
+}
+
+int gpio_event_output_func(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, int func)
+{
+ int ret;
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+ return 0;
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ int dev = oi->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_output_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ oi->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], oi->type,
+ oi->keymap[i].code);
+ }
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ ret = gpio_request(oi->keymap[i].gpio,
+ "gpio_event_output");
+ if (ret) {
+ pr_err("gpio_event_output_func: gpio_request "
+ "failed for %d\n", oi->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_output(oi->keymap[i].gpio,
+ output_level);
+ if (ret) {
+ pr_err("gpio_event_output_func: "
+ "gpio_direction_output failed for %d\n",
+ oi->keymap[i].gpio);
+ goto err_gpio_direction_output_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+ gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 000000000000..3ffab6da411b
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME "keychord"
+#define BUFFER_SIZE 16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+ ((char *)kc + sizeof(struct input_keychord) + \
+ kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+ struct input_handler input_handler;
+ int registered;
+
+ /* list of keychords to monitor */
+ struct input_keychord *keychords;
+ int keychord_count;
+
+ /* bitmask of keys contained in our keychords */
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ /* current state of the keys */
+ unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+ /* number of keys that are currently pressed */
+ int key_down;
+
+ /* second input_device_id is needed for null termination */
+ struct input_device_id device_ids[2];
+
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ unsigned char head;
+ unsigned char tail;
+ __u16 buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+ struct input_keychord *keychord)
+{
+ int i;
+
+ if (keychord->count != kdev->key_down)
+ return 0;
+
+ for (i = 0; i < keychord->count; i++) {
+ if (!test_bit(keychord->keycodes[i], kdev->keystate))
+ return 0;
+ }
+
+ /* we have a match */
+ return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct keychord_device *kdev = handle->private;
+ struct input_keychord *keychord;
+ unsigned long flags;
+ int i, got_chord = 0;
+
+ if (type != EV_KEY || code >= KEY_MAX)
+ return;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* do nothing if key state did not change */
+ if (!test_bit(code, kdev->keystate) == !value)
+ goto done;
+ __change_bit(code, kdev->keystate);
+ if (value)
+ kdev->key_down++;
+ else
+ kdev->key_down--;
+
+ /* don't notify on key up */
+ if (!value)
+ goto done;
+ /* ignore this event if it is not one of the keys we are monitoring */
+ if (!test_bit(code, kdev->keybit))
+ goto done;
+
+ keychord = kdev->keychords;
+ if (!keychord)
+ goto done;
+
+ /* check to see if the keyboard state matches any keychords */
+ for (i = 0; i < kdev->keychord_count; i++) {
+ if (check_keychord(kdev, keychord)) {
+ kdev->buff[kdev->head] = keychord->id;
+ kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+ got_chord = 1;
+ break;
+ }
+ /* skip to next keychord */
+ keychord = NEXT_KEYCHORD(keychord);
+ }
+
+done:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (got_chord)
+ wake_up_interruptible(&kdev->waitq);
+}
+
+static int keychord_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i, ret;
+ struct input_handle *handle;
+ struct keychord_device *kdev =
+ container_of(handler, struct keychord_device, input_handler);
+
+ /*
+ * ignore this input device if it does not contain any keycodes
+ * that we are monitoring
+ */
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = KEYCHORD_NAME;
+ handle->private = kdev;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ __u16 id;
+ int retval;
+ unsigned long flags;
+
+ if (count < sizeof(id))
+ return -EINVAL;
+ count = sizeof(id);
+
+ if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(kdev->waitq,
+ kdev->head != kdev->tail);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* pop a keychord ID off the queue */
+ id = kdev->buff[kdev->tail];
+ kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (copy_to_user(buffer, &id, count))
+ return -EFAULT;
+
+ return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ struct input_keychord *keychords = 0;
+ struct input_keychord *keychord, *next, *end;
+ int ret, i, key;
+ unsigned long flags;
+
+ if (count < sizeof(struct input_keychord))
+ return -EINVAL;
+ keychords = kzalloc(count, GFP_KERNEL);
+ if (!keychords)
+ return -ENOMEM;
+
+ /* read list of keychords from userspace */
+ if (copy_from_user(keychords, buffer, count)) {
+ kfree(keychords);
+ return -EFAULT;
+ }
+
+ /* unregister handler before changing configuration */
+ if (kdev->registered) {
+ input_unregister_handler(&kdev->input_handler);
+ kdev->registered = 0;
+ }
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* clear any existing configuration */
+ kfree(kdev->keychords);
+ kdev->keychords = 0;
+ kdev->keychord_count = 0;
+ kdev->key_down = 0;
+ memset(kdev->keybit, 0, sizeof(kdev->keybit));
+ memset(kdev->keystate, 0, sizeof(kdev->keystate));
+ kdev->head = kdev->tail = 0;
+
+ keychord = keychords;
+ end = (struct input_keychord *)((char *)keychord + count);
+
+ while (keychord < end) {
+ next = NEXT_KEYCHORD(keychord);
+ if (keychord->count <= 0 || next > end) {
+ pr_err("keychord: invalid keycode count %d\n",
+ keychord->count);
+ goto err_unlock_return;
+ }
+ if (keychord->version != KEYCHORD_VERSION) {
+ pr_err("keychord: unsupported version %d\n",
+ keychord->version);
+ goto err_unlock_return;
+ }
+
+ /* keep track of the keys we are monitoring in keybit */
+ for (i = 0; i < keychord->count; i++) {
+ key = keychord->keycodes[i];
+ if (key < 0 || key >= KEY_CNT) {
+ pr_err("keychord: keycode %d out of range\n",
+ key);
+ goto err_unlock_return;
+ }
+ __set_bit(key, kdev->keybit);
+ }
+
+ kdev->keychord_count++;
+ keychord = next;
+ }
+
+ kdev->keychords = keychords;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ ret = input_register_handler(&kdev->input_handler);
+ if (ret) {
+ kfree(keychords);
+ kdev->keychords = 0;
+ return ret;
+ }
+ kdev->registered = 1;
+
+ return count;
+
+err_unlock_return:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+ kfree(keychords);
+ return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ poll_wait(file, &kdev->waitq, wait);
+
+ if (kdev->head != kdev->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev;
+
+ kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ spin_lock_init(&kdev->lock);
+ init_waitqueue_head(&kdev->waitq);
+
+ kdev->input_handler.event = keychord_event;
+ kdev->input_handler.connect = keychord_connect;
+ kdev->input_handler.disconnect = keychord_disconnect;
+ kdev->input_handler.name = KEYCHORD_NAME;
+ kdev->input_handler.id_table = kdev->device_ids;
+
+ kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+ __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+ file->private_data = kdev;
+
+ return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ if (kdev->registered)
+ input_unregister_handler(&kdev->input_handler);
+ kfree(kdev);
+
+ return 0;
+}
+
+static const struct file_operations keychord_fops = {
+ .owner = THIS_MODULE,
+ .open = keychord_open,
+ .release = keychord_release,
+ .read = keychord_read,
+ .write = keychord_write,
+ .poll = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+ .fops = &keychord_fops,
+ .name = KEYCHORD_NAME,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+ return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+ misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 515cfe790543..c8cb4ec349f2 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -493,6 +493,12 @@ config TOUCHSCREEN_TNETV107X
To compile this driver as a module, choose M here: the
module will be called tnetv107x-ts.
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI
+ tristate "Synaptics i2c touchscreen"
+ depends on I2C
+ help
+ This enables support for Synaptics RMI over I2C based touchscreens.
+
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 6bfbeab67c9f..3c83d7e0e780 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI) += synaptics_i2c_rmi.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 000000000000..e30fa03d84f5
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ finger2_pressed = finger > 1 && finger != 7;
+ input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+ }
+
+ if (!finger)
+ z = 0;
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ } else if (ts->reported_finger_count > 1) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+ }
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(BTN_2, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b58bc8a14b9c..a79915e33925 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -550,6 +550,12 @@ config LEDS_TRIGGER_DEFAULT_ON
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
+config LEDS_TRIGGER_SLEEP
+ tristate "LED Sleep Mode Trigger"
+ depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND
+ help
+ This turns LEDs on when the screen is off but the cpu still running.
+
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3fb9641b6194..5105e3a92579 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -65,3 +65,5 @@ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
+obj-$(CONFIG_LEDS_TRIGGER_SLEEP) += ledtrig-sleep.o
+
diff --git a/drivers/leds/ledtrig-cpu.c b/drivers/leds/ledtrig-cpu.c
index 4239b3955ff0..0bb5463173eb 100644
--- a/drivers/leds/ledtrig-cpu.c
+++ b/drivers/leds/ledtrig-cpu.c
@@ -26,6 +26,8 @@
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <linux/rwsem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#include "leds.h"
#define MAX_NAME_LEN 8
@@ -92,6 +94,25 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
.resume = ledtrig_cpu_syscore_resume,
};
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ ledtrig_cpu(CPU_LED_IDLE_START);
+ break;
+ case IDLE_END:
+ ledtrig_cpu(CPU_LED_IDLE_END);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+ .notifier_call = leds_idle_notifier,
+};
+
static int __init ledtrig_cpu_init(void)
{
int cpu;
@@ -114,6 +135,8 @@ static int __init ledtrig_cpu_init(void)
register_syscore_ops(&ledtrig_cpu_syscore_ops);
+ idle_notifier_register(&leds_idle_nb);
+
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
return 0;
@@ -124,6 +147,8 @@ static void __exit ledtrig_cpu_exit(void)
{
int cpu;
+ idle_notifier_unregister(&leds_idle_nb);
+
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c
new file mode 100644
index 000000000000..f16404212152
--- /dev/null
+++ b/drivers/leds/ledtrig-sleep.c
@@ -0,0 +1,80 @@
+/* drivers/leds/ledtrig-sleep.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/leds.h>
+#include <linux/suspend.h>
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored);
+
+DEFINE_LED_TRIGGER(ledtrig_sleep)
+static struct notifier_block ledtrig_sleep_pm_notifier = {
+ .notifier_call = ledtrig_sleep_pm_callback,
+ .priority = 0,
+};
+
+static void ledtrig_sleep_early_suspend(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+}
+
+static void ledtrig_sleep_early_resume(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+}
+
+static struct early_suspend ledtrig_sleep_early_suspend_handler = {
+ .suspend = ledtrig_sleep_early_suspend,
+ .resume = ledtrig_sleep_early_resume,
+};
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+ return NOTIFY_OK;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __init ledtrig_sleep_init(void)
+{
+ led_trigger_register_simple("sleep", &ledtrig_sleep);
+ register_pm_notifier(&ledtrig_sleep_pm_notifier);
+ register_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ return 0;
+}
+
+static void __exit ledtrig_sleep_exit(void)
+{
+ unregister_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ unregister_pm_notifier(&ledtrig_sleep_pm_notifier);
+ led_trigger_unregister_simple(ledtrig_sleep);
+}
+
+module_init(ledtrig_sleep_init);
+module_exit(ledtrig_sleep_exit);
+
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1c0abd4dfc43..47ad4e270877 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -292,6 +292,7 @@ config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
select IRQ_DOMAIN
+ select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b151b7c1bd59..0a38c1ac34ab 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -373,6 +373,14 @@ config HMC6352
This driver provides support for the Honeywell HMC6352 compass,
providing configuration and heading data via sysfs.
+config SENSORS_AK8975
+ tristate "AK8975 compass support"
+ default n
+ depends on I2C
+ help
+ If you say yes here you get support for Asahi Kasei's
+ orientation sensor AK8975.
+
config EP93XX_PWM
tristate "EP93xx PWM support"
depends on ARCH_EP93XX
@@ -416,6 +424,10 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
+config UID_STAT
+ bool "UID based statistics tracking exported to /proc/uid_stat"
+ default n
+
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on X86
@@ -499,6 +511,14 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config WL127X_RFKILL
+ tristate "Bluetooth power control driver for TI wl127x"
+ depends on RFKILL
+ default n
+ ---help---
+ Creates an rfkill entry in sysfs for power control of Bluetooth
+ TI wl127x chips.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377c0de6..92b24bcbeb9f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
@@ -49,3 +50,6 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
+obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+
diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c
new file mode 100644
index 000000000000..31eadf8fcf40
--- /dev/null
+++ b/drivers/misc/akm8975.c
@@ -0,0 +1,732 @@
+/* drivers/misc/akm8975.c - akm8975 compass driver
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ * Author: Hou-Kun Chen <houkun.chen@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Revised by AKM 2009/04/02
+ * Revised by Motorola 2010/05/27
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/akm8975.h>
+#include <linux/earlysuspend.h>
+
+#define AK8975DRV_CALL_DBG 0
+#if AK8975DRV_CALL_DBG
+#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg);
+#else
+#define FUNCDBG(msg)
+#endif
+
+#define AK8975DRV_DATA_DBG 0
+#define MAX_FAILURE_COUNT 10
+
+struct akm8975_data {
+ struct i2c_client *this_client;
+ struct akm8975_platform_data *pdata;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct mutex flags_lock;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/*
+* Because misc devices can not carry a pointer from driver register to
+* open, we keep this global. This limits the driver to a single instance.
+*/
+struct akm8975_data *akmd_data;
+
+static DECLARE_WAIT_QUEUE_HEAD(open_wq);
+
+static atomic_t open_flag;
+
+static short m_flag;
+static short a_flag;
+static short t_flag;
+static short mv_flag;
+
+static short akmd_delay;
+
+static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,
+ AK8975_REG_CNTL));
+}
+static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ strict_strtoul(buf, 10, &val);
+ if (val > 0xff)
+ return -EINVAL;
+ i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val);
+ return count;
+}
+static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store);
+
+static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = akm->this_client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) {
+ pr_err("akm8975_i2c_rxdata: transfer error\n");
+ return EIO;
+ } else
+ return 0;
+}
+
+static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) {
+ pr_err("akm8975_i2c_txdata: transfer error\n");
+ return -EIO;
+ } else
+ return 0;
+}
+
+static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf)
+{
+ struct akm8975_data *data = i2c_get_clientdata(akm->this_client);
+
+ FUNCDBG("called");
+
+#if AK8975DRV_DATA_DBG
+ pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n",
+ rbuf[0], rbuf[1], rbuf[2]);
+ pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]);
+ pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n",
+ rbuf[6], rbuf[7], rbuf[8]);
+ pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ rbuf[9], rbuf[10], rbuf[11]);
+#endif
+ mutex_lock(&akm->flags_lock);
+ /* Report magnetic sensor information */
+ if (m_flag) {
+ input_report_abs(data->input_dev, ABS_RX, rbuf[0]);
+ input_report_abs(data->input_dev, ABS_RY, rbuf[1]);
+ input_report_abs(data->input_dev, ABS_RZ, rbuf[2]);
+ input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]);
+ }
+
+ /* Report acceleration sensor information */
+ if (a_flag) {
+ input_report_abs(data->input_dev, ABS_X, rbuf[6]);
+ input_report_abs(data->input_dev, ABS_Y, rbuf[7]);
+ input_report_abs(data->input_dev, ABS_Z, rbuf[8]);
+ input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]);
+ }
+
+ /* Report temperature information */
+ if (t_flag)
+ input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]);
+
+ if (mv_flag) {
+ input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]);
+ input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]);
+ input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]);
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ input_sync(data->input_dev);
+}
+
+static void akm8975_ecs_close_done(struct akm8975_data *akm)
+{
+ FUNCDBG("called");
+ mutex_lock(&akm->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&akm->flags_lock);
+}
+
+static int akm_aot_open(struct inode *inode, struct file *file)
+{
+ int ret = -1;
+
+ FUNCDBG("called");
+ if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
+ wake_up(&open_wq);
+ ret = 0;
+ }
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ file->private_data = akmd_data;
+
+ return ret;
+}
+
+static int akm_aot_release(struct inode *inode, struct file *file)
+{
+ FUNCDBG("called");
+ atomic_set(&open_flag, 0);
+ wake_up(&open_wq);
+ return 0;
+}
+
+static int akm_aot_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+ short flag;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ case ECS_IOCTL_APP_SET_AFLAG:
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ if (flag < 0 || flag > 1)
+ return -EINVAL;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&akm->flags_lock);
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ m_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MFLAG:
+ flag = m_flag;
+ break;
+ case ECS_IOCTL_APP_SET_AFLAG:
+ a_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_AFLAG:
+ flag = a_flag;
+ break;
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ mv_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ flag = mv_flag;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ akmd_delay = flag;
+ break;
+ case ECS_IOCTL_APP_GET_DELAY:
+ flag = akmd_delay;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_GET_MFLAG:
+ case ECS_IOCTL_APP_GET_AFLAG:
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ case ECS_IOCTL_APP_GET_DELAY:
+ if (copy_to_user(argp, &flag, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int akmd_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+
+ FUNCDBG("called");
+ err = nonseekable_open(inode, file);
+ if (err)
+ return err;
+
+ file->private_data = akmd_data;
+ return 0;
+}
+
+static int akmd_release(struct inode *inode, struct file *file)
+{
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+ akm8975_ecs_close_done(akm);
+ return 0;
+}
+
+static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+
+ char rwbuf[16];
+ int ret = -1;
+ int status;
+ short value[12];
+ short delay;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ case ECS_IOCTL_WRITE:
+ if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+
+ case ECS_IOCTL_SET_YPR:
+ if (copy_from_user(&value, argp, sizeof(value)))
+ return -EFAULT;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (rwbuf[0] < 1)
+ return -EINVAL;
+
+ ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case ECS_IOCTL_WRITE:
+ if (rwbuf[0] < 2)
+ return -EINVAL;
+
+ ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+ case ECS_IOCTL_SET_YPR:
+ akm8975_ecs_report_value(akm, value);
+ break;
+
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) != 0));
+ status = atomic_read(&open_flag);
+ break;
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) == 0));
+ status = atomic_read(&open_flag);
+ break;
+
+ case ECS_IOCTL_GET_DELAY:
+ delay = akmd_delay;
+ break;
+
+ default:
+ FUNCDBG("Unknown cmd\n");
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ if (copy_to_user(argp, &status, sizeof(status)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_DELAY:
+ if (copy_to_user(argp, &delay, sizeof(delay)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* needed to clear the int. pin */
+static void akm_work_func(struct work_struct *work)
+{
+ struct akm8975_data *akm =
+ container_of(work, struct akm8975_data, work);
+
+ FUNCDBG("called");
+ enable_irq(akm->this_client->irq);
+}
+
+static irqreturn_t akm8975_interrupt(int irq, void *dev_id)
+{
+ struct akm8975_data *akm = dev_id;
+ FUNCDBG("called");
+
+ disable_irq_nosync(akm->this_client->irq);
+ schedule_work(&akm->work);
+ return IRQ_HANDLED;
+}
+
+static int akm8975_power_off(struct akm8975_data *akm)
+{
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_off)
+ akm->pdata->power_off();
+
+ return 0;
+}
+
+static int akm8975_power_on(struct akm8975_data *akm)
+{
+ int err;
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_on) {
+ err = akm->pdata->power_on();
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_off(akm);
+}
+
+static int akm8975_resume(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_on(akm);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void akm8975_early_suspend(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_suspend(akm->this_client, PMSG_SUSPEND);
+}
+
+static void akm8975_early_resume(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_resume(akm->this_client);
+}
+#endif
+
+
+static int akm8975_init_client(struct i2c_client *client)
+{
+ struct akm8975_data *data;
+ int ret;
+
+ data = i2c_get_clientdata(client);
+
+ ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING,
+ "akm8975", data);
+
+ if (ret < 0) {
+ pr_err("akm8975_init_client: request irq failed\n");
+ goto err;
+ }
+
+ init_waitqueue_head(&open_wq);
+
+ mutex_lock(&data->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&data->flags_lock);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct file_operations akmd_fops = {
+ .owner = THIS_MODULE,
+ .open = akmd_open,
+ .release = akmd_release,
+ .ioctl = akmd_ioctl,
+};
+
+static const struct file_operations akm_aot_fops = {
+ .owner = THIS_MODULE,
+ .open = akm_aot_open,
+ .release = akm_aot_release,
+ .ioctl = akm_aot_ioctl,
+};
+
+static struct miscdevice akm_aot_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_aot",
+ .fops = &akm_aot_fops,
+};
+
+static struct miscdevice akmd_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_dev",
+ .fops = &akmd_fops,
+};
+
+int akm8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct akm8975_data *akm;
+ int err;
+ FUNCDBG("called");
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_platform_data_null;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+ if (!akm) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ err = -ENOMEM;
+ goto exit_alloc_data_failed;
+ }
+
+ akm->pdata = client->dev.platform_data;
+
+ mutex_init(&akm->flags_lock);
+ INIT_WORK(&akm->work, akm_work_func);
+ i2c_set_clientdata(client, akm);
+
+ err = akm8975_power_on(akm);
+ if (err < 0)
+ goto exit_power_on_failed;
+
+ akm8975_init_client(client);
+ akm->this_client = client;
+ akmd_data = akm;
+
+ akm->input_dev = input_allocate_device();
+ if (!akm->input_dev) {
+ err = -ENOMEM;
+ dev_err(&akm->this_client->dev,
+ "input device allocate failed\n");
+ goto exit_input_dev_alloc_failed;
+ }
+
+ set_bit(EV_ABS, akm->input_dev->evbit);
+
+ /* yaw */
+ input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0);
+ /* pitch */
+ input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0);
+ /* roll */
+ input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0);
+ /* x-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0);
+ /* temparature */
+ input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0);
+ /* status of magnetic sensor */
+ input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0);
+ /* status of acceleration sensor */
+ input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0);
+ /* x-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0);
+ /* y-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0);
+ /* z-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0);
+
+ akm->input_dev->name = "compass";
+
+ err = input_register_device(akm->input_dev);
+ if (err) {
+ pr_err("akm8975_probe: Unable to register input device: %s\n",
+ akm->input_dev->name);
+ goto exit_input_register_device_failed;
+ }
+
+ err = misc_register(&akmd_device);
+ if (err) {
+ pr_err("akm8975_probe: akmd_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = misc_register(&akm_aot_device);
+ if (err) {
+ pr_err("akm8975_probe: akm_aot_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = device_create_file(&client->dev, &dev_attr_akm_ms1);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ akm->early_suspend.suspend = akm8975_early_suspend;
+ akm->early_suspend.resume = akm8975_early_resume;
+ register_early_suspend(&akm->early_suspend);
+#endif
+ return 0;
+
+exit_misc_device_register_failed:
+exit_input_register_device_failed:
+ input_free_device(akm->input_dev);
+exit_input_dev_alloc_failed:
+ akm8975_power_off(akm);
+exit_power_on_failed:
+ kfree(akm);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+exit_platform_data_null:
+ return err;
+}
+
+static int akm8975_remove(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+ FUNCDBG("called");
+ free_irq(client->irq, NULL);
+ input_unregister_device(akm->input_dev);
+ misc_deregister(&akmd_device);
+ misc_deregister(&akm_aot_device);
+ akm8975_power_off(akm);
+ kfree(akm);
+ return 0;
+}
+
+static const struct i2c_device_id akm8975_id[] = {
+ { "akm8975", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, akm8975_id);
+
+static struct i2c_driver akm8975_driver = {
+ .probe = akm8975_probe,
+ .remove = akm8975_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .resume = akm8975_resume,
+ .suspend = akm8975_suspend,
+#endif
+ .id_table = akm8975_id,
+ .driver = {
+ .name = "akm8975",
+ },
+};
+
+static int __init akm8975_init(void)
+{
+ pr_info("AK8975 compass driver: init\n");
+ FUNCDBG("AK8975 compass driver: init\n");
+ return i2c_add_driver(&akm8975_driver);
+}
+
+static void __exit akm8975_exit(void)
+{
+ FUNCDBG("AK8975 compass driver: exit\n");
+ i2c_del_driver(&akm8975_driver);
+}
+
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>");
+MODULE_DESCRIPTION("AK8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 158da5a81a66..3c09cbb70b1d 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
@@ -131,6 +132,13 @@ static int ssc_probe(struct platform_device *pdev)
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "Failed to request pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 18794aea6062..e40ffd9502d1 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -187,13 +187,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
(cb = mei_amthif_find_read_list_entry(dev, file)));
+ /* Locking again the Mutex */
+ mutex_lock(&dev->device_lock);
+
if (wait_ret)
return -ERESTARTSYS;
dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
- /* Locking again the Mutex */
- mutex_lock(&dev->device_lock);
}
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 000000000000..2141124a6c12
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,156 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ unsigned long flags;
+ struct uid_stat *entry;
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return NULL;
+}
+
+static int tcp_snd_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int tcp_rcv_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ unsigned long flags;
+ char uid_s[32];
+ struct uid_stat *new_uid;
+ struct proc_dir_entry *entry;
+
+ /* Create the uid stat struct and append it to the list. */
+ if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_add_tail(&new_uid->link, &uid_list);
+ spin_unlock_irqrestore(&uid_lock, flags);
+
+ sprintf(uid_s, "%d", uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
+ (void *) new_uid);
+
+ create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
+ (void *) new_uid);
+
+ return new_uid;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c
new file mode 100644
index 000000000000..f5b95152948b
--- /dev/null
+++ b/drivers/misc/wl127x-rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wl127x-rfkill.h>
+
+static int wl127x_rfkill_set_power(void *data, enum rfkill_state state)
+{
+ int nshutdown_gpio = (int) data;
+
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ gpio_set_value(nshutdown_gpio, 1);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_set_value(nshutdown_gpio, 0);
+ break;
+ default:
+ printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int wl127x_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+ enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */
+
+ rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio");
+ if (unlikely(rc))
+ return rc;
+
+ rc = gpio_direction_output(pdata->nshutdown_gpio, 0);
+ if (unlikely(rc))
+ return rc;
+
+ rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state);
+ wl127x_rfkill_set_power(NULL, default_state);
+
+ pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (unlikely(!pdata->rfkill))
+ return -ENOMEM;
+
+ pdata->rfkill->name = "wl127x";
+ pdata->rfkill->state = default_state;
+ /* userspace cannot take exclusive control */
+ pdata->rfkill->user_claim_unsupported = 1;
+ pdata->rfkill->user_claim = 0;
+ pdata->rfkill->data = (void *) pdata->nshutdown_gpio;
+ pdata->rfkill->toggle_radio = wl127x_rfkill_set_power;
+
+ rc = rfkill_register(pdata->rfkill);
+
+ if (unlikely(rc))
+ rfkill_free(pdata->rfkill);
+
+ return 0;
+}
+
+static int wl127x_rfkill_remove(struct platform_device *pdev)
+{
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+
+ rfkill_unregister(pdata->rfkill);
+ rfkill_free(pdata->rfkill);
+ gpio_free(pdata->nshutdown_gpio);
+
+ return 0;
+}
+
+static struct platform_driver wl127x_rfkill_platform_driver = {
+ .probe = wl127x_rfkill_probe,
+ .remove = wl127x_rfkill_remove,
+ .driver = {
+ .name = "wl127x-rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wl127x_rfkill_init(void)
+{
+ return platform_driver_register(&wl127x_rfkill_platform_driver);
+}
+
+static void __exit wl127x_rfkill_exit(void)
+{
+ platform_driver_unregister(&wl127x_rfkill_platform_driver);
+}
+
+module_init(wl127x_rfkill_init);
+module_exit(wl127x_rfkill_exit);
+
+MODULE_ALIAS("platform:wl127x");
+MODULE_DESCRIPTION("wl127x-rfkill");
+MODULE_AUTHOR("Motorola");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924..ebb4afe6c702 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Deferr MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested. This will reduce overall resume latency and
+ save power when theres an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..fc562ebf5a23 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -148,11 +148,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
@@ -714,18 +710,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
-
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
return ERR_ABORT;
default:
@@ -1467,12 +1467,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 0;
}
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host)) {
+ mmc_resume_bus(card->host);
+// mmc_blk_set_blksize(md, card);
+ }
+#endif
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
@@ -1887,6 +1897,9 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
@@ -1912,6 +1925,9 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517f..85c2e1acd156 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,20 @@ config MMC_CLKGATE
support handling this in order for it to be of any use.
If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+ boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, support will be added for embedded SDIO
+ devices which do not contain the necessary enumeration
+ support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+ bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, the MMC layer will be extra paranoid
+ about re-trying SD init requests. This can be a useful
+ work-around for buggy controllers and hardware. Enable
+ if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed7687cf09..039503d53454 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,7 @@
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/wakelock.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -1411,6 +1412,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1476,6 +1507,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
spin_unlock_irqrestore(&host->lock, flags);
#endif
host->detect_change = 1;
+
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -2158,6 +2191,7 @@ void mmc_rescan(struct work_struct *work)
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ bool extend_wakelock = false;
if (host->rescan_disable)
return;
@@ -2179,6 +2213,12 @@ void mmc_rescan(struct work_struct *work)
host->detect_change = 0;
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -2207,16 +2247,24 @@ void mmc_rescan(struct work_struct *work)
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+ extend_wakelock = true;
break;
+ }
if (freqs[i] <= host->f_min)
break;
}
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ if (extend_wakelock)
+ wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+ else
+ wake_unlock(&host->detect_wake_lock);
+ if (host->caps & MMC_CAP_NEEDS_POLL) {
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, HZ);
+ }
}
void mmc_start_host(struct mmc_host *host)
@@ -2237,7 +2285,8 @@ void mmc_stop_host(struct mmc_host *host)
#endif
host->rescan_disable = 1;
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -2433,7 +2482,11 @@ int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
- cancel_delayed_work(&host->detect);
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
+ if (cancel_delayed_work(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
err = mmc_cache_ctrl(host, 0);
@@ -2488,6 +2541,12 @@ int mmc_resume_host(struct mmc_host *host)
int err = 0;
mmc_bus_get(host);
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
@@ -2548,9 +2607,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_needs_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
if (!host->bus_ops || host->bus_ops->suspend)
break;
@@ -2571,6 +2635,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
@@ -2581,6 +2649,22 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ee2e16b17017..aec77e5ef838 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -335,6 +335,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
+ wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+ kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
@@ -387,7 +389,8 @@ int mmc_add_host(struct mmc_host *host)
mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
- register_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ register_pm_notifier(&host->pm_notify);
return 0;
}
@@ -404,7 +407,9 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
- unregister_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ unregister_pm_notifier(&host->pm_notify);
+
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
@@ -431,6 +436,7 @@ void mmc_free_host(struct mmc_host *host)
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
spin_unlock(&mmc_host_lock);
+ wake_lock_destroy(&host->detect_wake_lock);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 74972c241dff..1833f730c2f8 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -796,6 +796,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
if (!reinit) {
/*
@@ -822,7 +825,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/*
* Fetch switch information from card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ for (retries = 1; retries <= 3; retries++) {
+ err = mmc_read_switch(card);
+ if (!err) {
+ if (retries > 1) {
+ printk(KERN_WARNING
+ "%s: recovered\n",
+ mmc_hostname(host));
+ }
+ break;
+ } else {
+ printk(KERN_WARNING
+ "%s: read switch failed (attempt %d)\n",
+ mmc_hostname(host), retries);
+ }
+ }
+#else
err = mmc_read_switch(card);
+#endif
+
if (err)
return err;
}
@@ -1033,18 +1055,36 @@ static int mmc_sd_alive(struct mmc_host *host)
*/
static void mmc_sd_detect(struct mmc_host *host)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries = 5;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
-
+
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ while(retries) {
+ err = mmc_send_status(host->card, NULL);
+ if (err) {
+ retries--;
+ udelay(5);
+ continue;
+ }
+ break;
+ }
+ if (!retries) {
+ printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+ __func__, mmc_hostname(host), err);
+ }
+#else
err = _mmc_detect_card_removed(host);
-
+#endif
mmc_release_host(host);
if (err) {
@@ -1085,12 +1125,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
static int mmc_sd_resume(struct mmc_host *host)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (err) {
+ printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ mdelay(5);
+ retries--;
+ continue;
+ }
+ break;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
mmc_release_host(host);
return err;
@@ -1144,6 +1203,9 @@ int mmc_attach_sd(struct mmc_host *host)
{
int err;
u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1206,9 +1268,27 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, NULL);
+ if (err) {
+ retries--;
+ continue;
+ }
+ break;
+ }
+
+ if (!retries) {
+ printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+ mmc_hostname(host), err);
+ goto err;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err)
goto err;
+#endif
mmc_release_host(host);
err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2273ce6b6c1a..de7f1338f79f 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
@@ -28,6 +29,10 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -716,19 +721,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto finish;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card, ocr);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card, ocr);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1123,14 +1144,36 @@ int mmc_attach_sdio(struct mmc_host *host)
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
/*
* Enable Runtime PM for this func (if supported)
*/
@@ -1178,3 +1221,77 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto err;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 5e57048e2c1d..19edd595604a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -24,6 +24,10 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
@@ -270,7 +274,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+ sdio_free_func_cis(func);
kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 78cb4d5d9d58..8fdeb07723a6 100644..100755
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -384,6 +384,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_readb);
/**
+ * sdio_readb_ext - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ * @in: value to add to argument
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+ int *err_ret, unsigned in)
+{
+ int ret;
+ unsigned char val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
* sdio_writeb - write a single byte to a SDIO function
* @func: SDIO function to access
* @b: byte to write
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6f0bfc0c8c9c..b559fd9e1706 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -694,11 +694,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
break;
}
- if (count >= 0xF) {
- DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
+ if (count >= 0xF)
count = 0xE;
- }
return count;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5819eb575210..c50fd99a1253 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+ tristate "Include chip ids for known NAND devices."
+ depends on MTD
+ help
+ Useful for NAND drivers that do not use the NAND subsystem but
+ still like to take advantage of the known chip information.
+
config MTD_NAND_ECC
tristate
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index e49c0eff040b..a9481606bbcd 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,6 +61,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
select PTP_1588_CLOCK
default y
---help---
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 01588b66a38c..f771ddfba646 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -80,12 +80,37 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
}
- memcpy(&bp->bnx2x_txq[old_txdata_index],
- &bp->bnx2x_txq[new_txdata_index],
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
sizeof(struct bnx2x_fp_txdata));
to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overriden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
@@ -3863,6 +3888,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
+ bnx2x_shrink_eth_fp(bp, delta);
if (CNIC_SUPPORT(bp))
/* move non eth FPs next to last eth FP
* must be done in that order
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 277f17e3c8f8..a427b49a886c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2777,10 +2777,10 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
} else {
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 940ef859dc60..5523da3afcdc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -127,6 +127,17 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr;
+ u32 umac_val;
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711,
@@ -9420,12 +9431,19 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
bnx2x_undi_int_disable_e1h(bp);
}
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
{
u32 val, base_addr, offset, mask, reset_reg;
bool mac_stopped = false;
u8 port = BP_PORT(bp);
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +9465,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
*/
wb_data[0] = REG_RD(bp, base_addr + offset);
wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR(bp, base_addr + offset, wb_data[0]);
- REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
}
BNX2X_DEV_INFO("Disable emac Rx\n");
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
mac_stopped = true;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,14 +9487,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
val & ~(1 << 1));
REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
val | (1 << 1));
- REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(bp, vals->umac_addr);
+ REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
}
@@ -9664,12 +9690,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
{
u32 reset_reg, tmp_reg = 0, rc;
bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
/* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BNX2X_DEV_INFO("Common unload Flow\n");
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
@@ -9680,7 +9710,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
u32 timer_count = 1000;
/* Close the MAC Rx to prevent BRB from filling up */
- bnx2x_prev_unload_close_mac(bp);
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+ /* close LLH filters towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
@@ -9727,6 +9760,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* No packets are in the pipeline, path is ready for reset */
bnx2x_reset_common(bp);
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr)
+ REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
rc = bnx2x_prev_mark_path(bp, prev_undi);
if (rc) {
bnx2x_prev_mcp_done(bp);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 3bc1912afba9..4eba17b83ba8 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -190,6 +190,7 @@ struct be_eq_obj {
u8 idx; /* array index */
u16 tx_budget;
+ u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9dca22be8125..5c995700e534 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2026,19 +2026,30 @@ static irqreturn_t be_intx(int irq, void *dev)
struct be_adapter *adapter = eqo->adapter;
int num_evts = 0;
- /* On Lancer, clear-intr bit of the EQ DB does not work.
- * INTx is de-asserted only on notifying num evts.
+ /* IRQ is not expected when NAPI is scheduled as the EQ
+ * will not be armed.
+ * But, this can happen on Lancer INTx where it takes
+ * a while to de-assert INTx or in BE2 where occasionaly
+ * an interrupt may be raised even when EQ is unarmed.
+ * If NAPI is already scheduled, then counting & notifying
+ * events will orphan them.
*/
- if (lancer_chip(adapter))
+ if (napi_schedule_prep(&eqo->napi)) {
num_evts = events_get(eqo);
+ __napi_schedule(&eqo->napi);
+ if (num_evts)
+ eqo->spurious_intr = 0;
+ }
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- /* The EQ-notify may not de-assert INTx rightaway, causing
- * the ISR to be invoked again. So, return HANDLED even when
- * num_evts is zero.
+ /* Return IRQ_HANDLED only for the the first spurious intr
+ * after a valid intr to stop the kernel from branding
+ * this irq as a bad one!
*/
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- napi_schedule(&eqo->napi);
- return IRQ_HANDLED;
+ if (num_evts || eqo->spurious_intr++ == 0)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
static irqreturn_t be_msix(int irq, void *dev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f80cd975daed..3e73742024b0 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4678,7 +4678,7 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 5778a4ae1164..122d60c0481b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on (PPC32 || MICROBLAZE)
+ depends on MICROBLAZE
select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d9f69b82cc4f..6f47100e58d7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1590,7 +1590,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&op->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto err_iounmap_2;
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 872df3ef07a6..7936ae4cbf7a 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -148,6 +148,23 @@ config PPPOL2TP
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
+config PPPOLAC
+ tristate "PPP on L2TP Access Concentrator"
+ depends on PPP && INET
+ help
+ L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+ networks. This driver handles L2TP data packets between a UDP socket
+ and a PPP channel, but only permits one session per socket. Thus it is
+ fairly simple and suited for clients.
+
+config PPPOPNS
+ tristate "PPP on PPTP Network Server"
+ depends on PPP && INET
+ help
+ PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+ networks. This driver handles PPTP data packets between a RAW socket
+ and a PPP channel. It is fairly simple and easy to use.
+
config PPP_ASYNC
tristate "PPP support for async serial ports"
depends on PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297b0066..d283d03c4683 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644
index 000000000000..c94b8507d92b
--- /dev/null
+++ b/drivers/net/ppp/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT 0x80
+#define L2TP_LENGTH_BIT 0x40
+#define L2TP_SEQUENCE_BIT 0x08
+#define L2TP_OFFSET_BIT 0x02
+#define L2TP_VERSION 0x02
+#define L2TP_VERSION_MASK 0x0F
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+union unaligned {
+ __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+ return (union unaligned *)ptr;
+}
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+ struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ __u8 bits;
+ __u8 *ptr;
+
+ /* Drop the packet if L2TP header is missing. */
+ if (skb->len < sizeof(struct udphdr) + 6)
+ goto drop;
+
+ /* Put it back if it is a control packet. */
+ if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+ return opt->backlog_rcv(sk_udp, skb);
+
+ /* Skip UDP header. */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ /* Check the version. */
+ if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+ goto drop;
+ bits = skb->data[0];
+ ptr = &skb->data[2];
+
+ /* Check the length if it is present. */
+ if (bits & L2TP_LENGTH_BIT) {
+ if ((ptr[0] << 8 | ptr[1]) != skb->len)
+ goto drop;
+ ptr += 2;
+ }
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+ (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+ (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+ goto drop;
+
+ /* Skip the offset padding if it is present. */
+ if (bits & L2TP_OFFSET_BIT &&
+ !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+ goto drop;
+
+ /* Check the tunnel and the session. */
+ if (unaligned(ptr)->u32 != opt->local)
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (bits & L2TP_SEQUENCE_BIT) {
+ meta->sequence = ptr[4] << 8 | ptr[5];
+ if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+ if (bits & L2TP_SEQUENCE_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s16 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = (__u16)(meta->sequence + 1);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+ sock_hold(sk_udp);
+ sk_receive_skb(sk_udp, skb, 0);
+ return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_udp = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_udp = (struct sock *)chan->private;
+ struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+
+ /* Install L2TP header. */
+ if (atomic_read(&opt->sequencing)) {
+ skb_push(skb, 10);
+ skb->data[0] = L2TP_SEQUENCE_BIT;
+ skb->data[6] = opt->xmit_sequence >> 8;
+ skb->data[7] = opt->xmit_sequence;
+ skb->data[8] = 0;
+ skb->data[9] = 0;
+ opt->xmit_sequence++;
+ } else {
+ skb_push(skb, 6);
+ skb->data[0] = 0;
+ }
+ skb->data[1] = L2TP_VERSION;
+ unaligned(&skb->data[2])->u32 = opt->remote;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_udp);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+ .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+ struct socket *sock_udp = NULL;
+ struct sock *sk_udp;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppolac) ||
+ !addr->local.tunnel || !addr->local.session ||
+ !addr->remote.tunnel || !addr->remote.session) {
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_udp = sockfd_lookup(addr->udp_socket, &error);
+ if (!sock_udp)
+ goto out;
+ sk_udp = sock_udp->sk;
+ lock_sock(sk_udp);
+
+ /* Remove this check when IPv6 supports UDP encapsulation. */
+ error = -EAFNOSUPPORT;
+ if (sk_udp->sk_family != AF_INET)
+ goto out;
+ error = -EPROTONOSUPPORT;
+ if (sk_udp->sk_protocol != IPPROTO_UDP)
+ goto out;
+ error = -EDESTADDRREQ;
+ if (sk_udp->sk_state != TCP_ESTABLISHED)
+ goto out;
+ error = -EBUSY;
+ if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+ goto out;
+ if (!sk_udp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_udp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ po->chan.hdrlen = 12;
+ po->chan.private = sk_udp;
+ po->chan.ops = &pppolac_channel_ops;
+ po->chan.mtu = PPP_MTU - 80;
+ po->proto.lac.local = unaligned(&addr->local)->u32;
+ po->proto.lac.remote = unaligned(&addr->remote)->u32;
+ atomic_set(&po->proto.lac.sequencing, 1);
+ po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+ sk_udp->sk_backlog_rcv = pppolac_recv_core;
+ sk_udp->sk_user_data = sk;
+out:
+ if (sock_udp) {
+ release_sock(sk_udp);
+ if (error)
+ sockfd_put(sock_udp);
+ }
+ release_sock(sk);
+ return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_udp);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ udp_sk(sk_udp)->encap_type = 0;
+ udp_sk(sk_udp)->encap_rcv = NULL;
+ sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+ sk_udp->sk_user_data = NULL;
+ release_sock(sk_udp);
+ sockfd_put(sk_udp->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+ .name = "PPPOLAC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppolac_release,
+ .bind = sock_no_bind,
+ .connect = pppolac_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppolac_proto_ops;
+ sk->sk_protocol = PX_PROTO_OLAC;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+ .create = pppolac_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+ int error;
+
+ error = proto_register(&pppolac_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+ if (error)
+ proto_unregister(&pppolac_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OLAC);
+ proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644
index 000000000000..fb8198447938
--- /dev/null
+++ b/drivers/net/ppp/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE 8
+
+#define PPTP_GRE_BITS htons(0x2001)
+#define PPTP_GRE_BITS_MASK htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT htons(0x1000)
+#define PPTP_GRE_ACK_BIT htons(0x0080)
+#define PPTP_GRE_TYPE htons(0x880B)
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+struct header {
+ __u16 bits;
+ __u16 type;
+ __u16 length;
+ __u16 call;
+ __u32 sequence;
+} __attribute__((packed));
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+ struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ struct header *hdr;
+
+ /* Skip transport header */
+ skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+ /* Drop the packet if GRE header is missing. */
+ if (skb->len < GRE_HEADER_SIZE)
+ goto drop;
+ hdr = (struct header *)skb->data;
+
+ /* Check the header. */
+ if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+ (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+ goto drop;
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, GRE_HEADER_SIZE +
+ (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+ (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+ goto drop;
+
+ /* Check the length. */
+ if (skb->len != ntohs(hdr->length))
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ meta->sequence = ntohl(hdr->sequence);
+ if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s32 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = meta->sequence + 1;
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+ sock_hold(sk_raw);
+ sk_receive_skb(sk_raw, skb, 0);
+ }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_raw = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_raw = (struct sock *)chan->private;
+ struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+ struct header *hdr;
+ __u16 length;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+ length = skb->len;
+
+ /* Install PPTP GRE header. */
+ hdr = (struct header *)skb_push(skb, 12);
+ hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+ hdr->type = PPTP_GRE_TYPE;
+ hdr->length = htons(length);
+ hdr->call = opt->remote;
+ hdr->sequence = htonl(opt->xmit_sequence);
+ opt->xmit_sequence++;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_raw);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+ .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+ struct sockaddr_storage ss;
+ struct socket *sock_tcp = NULL;
+ struct socket *sock_raw = NULL;
+ struct sock *sk_tcp;
+ struct sock *sk_raw;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppopns))
+ return -EINVAL;
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+ if (!sock_tcp)
+ goto out;
+ sk_tcp = sock_tcp->sk;
+ error = -EPROTONOSUPPORT;
+ if (sk_tcp->sk_protocol != IPPROTO_TCP)
+ goto out;
+ addrlen = sizeof(struct sockaddr_storage);
+ error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+ if (error)
+ goto out;
+ if (!sk_tcp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_tcp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+ if (error)
+ goto out;
+ sk_raw = sock_raw->sk;
+ sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+ error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+ if (error)
+ goto out;
+
+ po->chan.hdrlen = 14;
+ po->chan.private = sk_raw;
+ po->chan.ops = &pppopns_channel_ops;
+ po->chan.mtu = PPP_MTU - 80;
+ po->proto.pns.local = addr->local;
+ po->proto.pns.remote = addr->remote;
+ po->proto.pns.data_ready = sk_raw->sk_data_ready;
+ po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ lock_sock(sk_raw);
+ sk_raw->sk_data_ready = pppopns_recv;
+ sk_raw->sk_backlog_rcv = pppopns_recv_core;
+ sk_raw->sk_user_data = sk;
+ release_sock(sk_raw);
+out:
+ if (sock_tcp)
+ sockfd_put(sock_tcp);
+ if (error && sock_raw)
+ sock_release(sock_raw);
+ release_sock(sk);
+ return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_raw);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+ sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+ sk_raw->sk_user_data = NULL;
+ release_sock(sk_raw);
+ sock_release(sk_raw->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+ .name = "PPPOPNS",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppopns_release,
+ .bind = sock_no_bind,
+ .connect = pppopns_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppopns_proto_ops;
+ sk->sk_protocol = PX_PROTO_OPNS;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+ .create = pppopns_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+ int error;
+
+ error = proto_register(&pppopns_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+ if (error)
+ proto_unregister(&pppopns_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OPNS);
+ proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbd106edbe59..163638c3fc8e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -404,8 +404,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
struct tun_struct *tun;
struct net_device *dev;
- tun = rcu_dereference_protected(tfile->tun,
- lockdep_rtnl_is_held());
+ tun = rtnl_dereference(tfile->tun);
+
if (tun) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
@@ -414,8 +414,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
rcu_assign_pointer(tfile->tun, NULL);
- ntfile = rcu_dereference_protected(tun->tfiles[index],
- lockdep_rtnl_is_held());
+ ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
@@ -429,8 +428,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
tun_set_real_num_queues(tun);
- } else if (tfile->detached && clean)
+ } else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
+ sock_put(&tfile->sk);
+ }
if (clean) {
if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
@@ -458,8 +459,7 @@ static void tun_detach_all(struct net_device *dev)
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
wake_up_all(&tfile->wq.wait);
rcu_assign_pointer(tfile->tun, NULL);
@@ -469,8 +469,7 @@ static void tun_detach_all(struct net_device *dev)
synchronize_net();
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
sock_put(&tfile->sk);
@@ -481,6 +480,9 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
+
+ if (tun->flags & TUN_PERSIST)
+ module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -489,7 +491,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
int err;
err = -EINVAL;
- if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+ if (rtnl_dereference(tfile->tun))
goto out;
err = -EBUSY;
@@ -1544,6 +1546,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
struct net_device *dev;
int err;
+ if (tfile->detached)
+ return -EINVAL;
+
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1738,8 +1743,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
struct tun_file *tfile;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
sk_detach_filter(tfile->socket.sk);
}
@@ -1752,8 +1756,7 @@ static int tun_attach_filter(struct tun_struct *tun)
struct tun_file *tfile;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
@@ -1771,8 +1774,7 @@ static void tun_set_sndbuf(struct tun_struct *tun)
int i;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_sndbuf = tun->sndbuf;
}
}
@@ -1789,13 +1791,10 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
tun = tfile->detached;
if (!tun)
ret = -EINVAL;
- else if (tun_not_capable(tun))
- ret = -EPERM;
else
ret = tun_attach(tun, file);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
- tun = rcu_dereference_protected(tfile->tun,
- lockdep_rtnl_is_held());
+ tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & TUN_TAP_MQ))
ret = -EINVAL;
else
@@ -1820,6 +1819,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+#endif
+
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
@@ -1880,10 +1885,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
- if (arg) {
+ if (arg && !(tun->flags & TUN_PERSIST)) {
tun->flags |= TUN_PERSIST;
__module_get(THIS_MODULE);
- } else {
+ }
+ if (!arg && (tun->flags & TUN_PERSIST)) {
tun->flags &= ~TUN_PERSIST;
module_put(THIS_MODULE);
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 28aa05f60c26..dc1c03c19016 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -264,9 +264,15 @@ config MWL8K
To compile this driver as a module, choose M here: the module
will be called mwl8k. If unsure, say N.
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
source "drivers/net/wireless/brcm80211/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 67156efe14c4..1d5decb22c2f 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -55,5 +55,7 @@ obj-$(CONFIG_WL_TI) += ti/
obj-$(CONFIG_MWIFIEX) += mwifiex/
+obj-$(CONFIG_BCMDHD) += bcmdhd/
+
obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1a67a4f829fe..2c02b4e84094 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -30,5 +30,6 @@ source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 1e18621326dc..97b964ded2be 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000000..bac3d98a0cfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,29 @@
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ depends on CFG80211
+ depends on PCI
+ default n
+ ---help---
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ http://wireless.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ ---help---
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000000..9396dc9fe3c5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000000..116f4e807ae1
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) { \
+ .band = IEEE80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wmi_notify_req_cmd cmd = {
+ .cid = 0,
+ .interval_usec = 0,
+ };
+
+ if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+ return -ENOENT;
+
+ /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ if (rc)
+ return rc;
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.mcs = wil->stats.bf_mcs;
+ sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = 12; /* TODO: provide real value */
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (flags)
+ wil->monitor_flags = *flags;
+ else
+ wil->monitor_flags = 0;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+
+ if (wil->scan_request) {
+ wil_err(wil, "Already scanning\n");
+ return -EAGAIN;
+ }
+
+ /* check we are client side */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, &wil->status)) {
+ wil_err(wil, "Scan after connect attempt not supported\n");
+ return -EBUSY;
+ }
+
+ wil->scan_request = request;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+ if (rsn_eid) {
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ rc = -ERANGE;
+ wil_err(wil, "IE too large (%td bytes)\n",
+ sme->ie_len);
+ goto out;
+ }
+ /*
+ * For secure assoc, send:
+ * (1) WMI_DELETE_CIPHER_KEY_CMD
+ * (2) WMI_SET_APPIE_CMD
+ */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ goto out;
+ }
+ /* WMI_SET_APPIE_CMD */
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc) {
+ wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ switch (bss->capability & 0x03) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ conn.network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ conn.network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ goto out;
+ }
+ if (rsn_eid) {
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ } else {
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ conn.channel = ch - 1;
+
+ memcpy(conn.bssid, bss->bssid, 6);
+ memcpy(conn.dst_mac, bss->bssid, 6);
+ /*
+ * FW don't support scan after connection attempt
+ */
+ set_bit(wil_status_dontscan, &wil->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+ if (rc == 0) {
+ /* Connect can take lots of time */
+ mod_timer(&wil->connect_timer,
+ jiffies + msecs_to_jiffies(2000));
+ }
+
+ out:
+ cfg80211_put_bss(bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wdev->preset_chandef = *chandef;
+
+ return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_add_cipher_key(wil, key_index, mac_addr,
+ params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+ info->ssid, info->ssid_len);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_channel(wil, channel->hw_value);
+ if (rc)
+ return rc;
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* IE's */
+ /* bcon 'head IE's are not relevant for 60g band */
+ wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ bcon->beacon_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+
+ wil->secure_pcp = info->privacy;
+
+ rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil);
+
+ netif_carrier_on(ndev);
+
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ /* To stop beaconing, set BI to 0 */
+ rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+ return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+ .scan = wil_cfg80211_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ /* TODO: set real value */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ /* TODO: enable P2P when integrated with supplicant:
+ * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+ */
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* TODO: figure this out */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+ int rc = 0;
+ struct wireless_dev *wdev;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+ sizeof(struct wil6210_priv));
+ if (!wdev->wiphy) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wil_wiphy_init(wdev->wiphy);
+
+ rc = wiphy_register(wdev->wiphy);
+ if (rc < 0)
+ goto out_failed_reg;
+
+ return wdev;
+
+out_failed_reg:
+ wiphy_free(wdev->wiphy);
+out:
+ kfree(wdev);
+
+ return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644
index 000000000000..6a315ba5aa7d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
@@ -0,0 +1,30 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
+ __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ print_hex_dump(KERN_DEBUG, prefix_str, \
+ prefix_type, rowsize, groupsize, \
+ buf, len, ascii); \
+} while (0)
+
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ wil_dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000000..65fc9683bfd8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct vring *vring)
+{
+ void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+ seq_printf(s, "VRING %s = {\n", name);
+ seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " va = 0x%p\n", vring->va);
+ seq_printf(s, " size = %d\n", vring->size);
+ seq_printf(s, " swtail = %d\n", vring->swtail);
+ seq_printf(s, " swhead = %d\n", vring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ if (x)
+ seq_printf(s, "0x%08x\n", ioread32(x));
+ else
+ seq_printf(s, "???\n");
+
+ if (vring->va && (vring->size < 1025)) {
+ uint i;
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &vring->va[i].tx;
+ if ((i % 64) == 0 && (i != 0))
+ seq_printf(s, "\n");
+ seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+ "S" : (vring->ctx[i] ? "H" : "h"));
+ }
+ seq_printf(s, "\n");
+ }
+ seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ struct vring *vring = &(wil->vring_tx[i]);
+ if (vring->va) {
+ char name[10];
+ snprintf(name, sizeof(name), "tx_%2d", i);
+ wil_print_vring(s, wil, name, vring);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+ .open = wil_vring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_printf(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ int n = 0;
+ unsigned char printbuf[16 * 3 + 2];
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ while (n < len) {
+ int l = min(len - n, 16);
+ hex_dump_to_buffer(databuf + n, l,
+ 16, 1, printbuf,
+ sizeof(printbuf),
+ false);
+ seq_printf(s, " : %s\n", printbuf);
+ n += l;
+ }
+ }
+ } else {
+ seq_printf(s, "\n");
+ }
+ }
+ out:
+ seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+ .open = wil_mbox_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, (void __iomem *)data);
+ wmb(); /* make sure write propagated to HW */
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32((void __iomem *)data);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void __iomem *value)
+{
+ return debugfs_create_file(name, mode, parent, (void * __force)value,
+ &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name,
+ struct dentry *parent, u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+ wil->csr + off);
+ wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 4);
+ wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 8);
+ wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+ wil->csr + off + 12);
+ wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 16);
+ wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+ wil->csr + off + 20);
+ wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+ wil->csr + off + 24);
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+ wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+ return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+ .open = wil_memread_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ loff_t pos = *ppos;
+ size_t available = blob->size;
+ void *buf;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+ pos, count);
+
+ ret = copy_to_user(user_buf, buf, count);
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = wil_default_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ /**
+ * BUG:
+ * this code does NOT sync device state with the rest of system
+ * use with care, debug only!!!
+ */
+ rtnl_lock();
+ dev_close(ndev);
+ ndev->flags &= ~IFF_UP;
+ rtnl_unlock();
+ wil_reset(wil);
+
+ return len;
+}
+
+static const struct file_operations fops_reset = {
+ .write = wil_write_file_reset,
+ .open = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct vring *vring = &(wil->vring_tx[0]);
+
+ if (!vring->va) {
+ seq_printf(s, "No Tx VRING\n");
+ return 0;
+ }
+
+ if (dbg_txdesc_index < vring->size) {
+ volatile struct vring_tx_desc *d =
+ &(vring->va[dbg_txdesc_index].tx);
+ volatile u32 *u = (volatile u32 *)d;
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+ seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = %p\n", skb);
+
+ if (skb) {
+ unsigned char printbuf[16 * 3 + 2];
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+
+ seq_printf(s, " len = %d\n", len);
+
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, " : %s\n", printbuf);
+ i += l;
+ }
+ }
+ seq_printf(s, "}\n");
+ } else {
+ seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
+ }
+
+ return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+ .open = wil_txdesc_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ seq_printf(s,
+ "TSF : 0x%016llx\n"
+ "TxMCS : %d\n"
+ "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+ wil->stats.tsf, wil->stats.bf_mcs,
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+ return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+ .open = wil_bf_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ if (*ppos != 0) {
+ wil_err(wil, "Unable to set SSID substring from [%d]\n",
+ (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(wdev->ssid)) {
+ wil_err(wil, "SSID too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+ if (netif_running(ndev)) {
+ wil_err(wil, "Unable to change SSID on running interface\n");
+ return -EINVAL;
+ }
+
+ wdev->ssid_len = count;
+ return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+ buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+ .read = wil_read_file_ssid,
+ .write = wil_write_file_ssid,
+ .open = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+ debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+ debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_txdesc_index);
+ debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+ debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+ debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+ &wil->secure_pcp);
+
+ wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+ HOSTADDR(RGF_USER_USER_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_TX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_RX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_MISC_ICR));
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+ debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+ debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+ wil->rgf_blob.data = (void * __force)wil->csr + 0;
+ wil->rgf_blob.size = 0xa000;
+ wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+ wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+ wil->fw_code_blob.size = 0x40000;
+ wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+ &wil->fw_code_blob);
+
+ wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+ wil->fw_data_blob.size = 0x8000;
+ wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+ &wil->fw_data_blob);
+
+ wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+ wil->fw_peri_blob.size = 0x18000;
+ wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+ &wil->fw_peri_blob);
+
+ wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+ wil->uc_code_blob.size = 0x10000;
+ wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+ &wil->uc_code_blob);
+
+ wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+ wil->uc_data_blob.size = 0x4000;
+ wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+ &wil->uc_data_blob);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000000..38049da71049
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+ clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_TX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_RX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_MISC, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ set_bit(wil_status_irqen, &wil->status);
+
+ iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx(wil);
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ wil_dbg_IRQ(wil, "RX done\n");
+ isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+ wil_rx_handle(wil);
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx(wil);
+
+ if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+ uint i;
+ wil_dbg_IRQ(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ for (i = 0; i < 24; i++) {
+ u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+ if (isr & mask) {
+ isr &= ~mask;
+ wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+ wil_tx_complete(wil, i);
+ }
+ }
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_misc(wil);
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_IRQ(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_IRQ(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ if (!test_bit(wil_status_irqen, &wil->status)) {
+ u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_rx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_tx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_misc = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+ return IRQ_NONE;
+
+ /* FIXME: IRQ mask debug */
+ if (wil6210_debug_irq_mask(wil, pseudo_cause))
+ return IRQ_NONE;
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ /*
+ * IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+
+ rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+ WIL_NAME"_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+ WIL_NAME"_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME"_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+ /* error branch */
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ if (rc)
+ return rc;
+
+ wil6210_enable_irq(wil);
+
+ return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil6210_disable_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000000..95fcd361322b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ /* size_t is unsigned, if (count%4 != 0) it will wrap */
+ for (count += 4; count > 4; count -= 4)
+ *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ uint i;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ wil_link_off(wil);
+ clear_bit(wil_status_fwconnected, &wil->status);
+
+ switch (wdev->sme_state) {
+ case CFG80211_SME_CONNECTED:
+ cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ break;
+ case CFG80211_SME_CONNECTING:
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ default:
+ ;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+ wil_vring_fini_tx(wil, i);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, disconnect_worker);
+
+ _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg(wil, "Connect timeout\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context
+ */
+ schedule_work(&wil->disconnect_worker);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ wil_dbg(wil, "%s()\n", __func__);
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->wmi_mutex);
+
+ init_completion(&wil->wmi_ready);
+
+ wil->pending_connect_cid = -1;
+ setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+ INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+ INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ spin_lock_init(&wil->wmi_ev_lock);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+ if (!wil->wmi_wq_conn) {
+ destroy_workqueue(wil->wmi_wq);
+ return -EAGAIN;
+ }
+
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+ return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ del_timer_sync(&wil->connect_timer);
+ _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+ wmi_event_flush(wil);
+ destroy_workqueue(wil->wmi_wq_conn);
+ destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+ wil_dbg(wil, "Resetting...\n");
+
+ /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+ /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+ wil->csr + HOSTADDR(a))
+
+ /* hpal_perst_from_pad_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+ /* car_perst_rst_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+ W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
+ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ msleep(2000);
+
+ W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+ msleep(2000);
+
+ wil_dbg(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(1000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_dbg(wil, "FW ready after %d ms\n",
+ jiffies_to_msecs(to-left));
+ }
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+ int rc;
+
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wmi_wq);
+ flush_workqueue(wil->wmi_wq_conn);
+
+ wil6210_disable_irq(wil);
+ wil->status = 0;
+
+ /* TODO: put MAC in reset */
+ wil_target_reset(wil);
+
+ /* init after reset */
+ wil->pending_connect_cid = -1;
+ INIT_COMPLETION(wil->wmi_ready);
+
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+ /* TODO: release MAC reset */
+ wil6210_enable_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+
+ return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+ int rc;
+ int bi;
+ u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+ wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg(wil, "type: STATION\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg(wil, "type: AP\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg(wil, "type: P2P_CLIENT\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg(wil, "type: P2P_GO\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg(wil, "type: Monitor\n");
+ bi = 0;
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Apply profile in the following order: */
+ /* SSID and channel for the AP */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->ssid_len == 0) {
+ wil_err(wil, "SSID not set\n");
+ return -EINVAL;
+ }
+ wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+ if (channel)
+ wmi_set_channel(wil, channel->hw_value);
+ break;
+ default:
+ ;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* Set up beaconing if required. */
+ rc = wmi_set_bcon(wil, bi, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ wil_rx_init(wil);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ wil6210_disconnect(wil, NULL);
+ wil_rx_fini(wil);
+
+ return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000000..3068b5cb53a7
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_down(wil);
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
+{
+ static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ u16 rc;
+
+ skb->priority = cfg80211_classify8021d(skb);
+
+ rc = wil_1d_to_queue[skb->priority];
+
+ wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
+ (int)rc);
+
+ return rc;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_select_queue = wil_select_queue,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+ int rc = 0;
+
+ wdev = wil_cfg80211_init(dev);
+ if (IS_ERR(wdev)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wdev;
+ }
+
+ wil = wdev_to_wil(wdev);
+ wil->csr = csr;
+ wil->wdev = wdev;
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_wdev;
+ }
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+ /* default monitor channel */
+ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+ ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+ if (!ndev) {
+ dev_err(dev, "alloc_netdev_mqs failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ wil_link_off(wil);
+
+ return wil;
+
+ out_priv:
+ wil_priv_deinit(wil);
+
+ out_wdev:
+ wil_wdev_free(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ if (!ndev)
+ return;
+
+ free_netdev(ndev);
+ wil_priv_deinit(wil);
+ wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+
+ rc = register_netdev(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ return rc;
+ }
+
+ wil_link_off(wil);
+
+ return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000000..0fc83edd6bad
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+ " Use MSI interrupt: "
+ "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+
+ pci_set_master(pdev);
+
+ /*
+ * how many MSI interrupts to request?
+ */
+ switch (use_msi) {
+ case 3:
+ case 1:
+ case 0:
+ break;
+ default:
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+ use_msi);
+ use_msi = 1;
+ }
+ wil->n_msi = use_msi;
+ if (wil->n_msi) {
+ wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ if (rc && (wil->n_msi == 3)) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ wil->n_msi = 1;
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ }
+ if (rc) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ wil->n_msi = 0;
+ }
+ } else {
+ wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto stop_master;
+
+ /* need reset here to obtain MAC */
+ rc = wil_reset(wil);
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ void __iomem *csr;
+ int rc;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+ dev_err(&pdev->dev, "Not " WIL_NAME "? "
+ "BAR0 size is %lu while expecting %lu\n",
+ (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ return -ENODEV;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+ /* rollback to err_disable_pdev */
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ csr = pci_ioremap_bar(pdev, 0);
+ if (!csr) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+ wil = wil_if_alloc(dev, csr);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ goto err_iounmap;
+ }
+ /* rollback to if_free */
+
+ pci_set_drvdata(pdev, wil);
+ wil->pdev = pdev;
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto if_free;
+ }
+ /* rollback to bus_disable */
+
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ wil6210_debugfs_init(wil);
+
+ /* check FW is alive */
+ wmi_echo(wil);
+
+ return 0;
+
+ bus_disable:
+ wil_if_pcie_disable(wil);
+ if_free:
+ wil_if_free(wil);
+ err_iounmap:
+ pci_iounmap(pdev, csr);
+ err_release_reg:
+ pci_release_region(pdev, 0);
+ err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil6210_debugfs_remove(wil);
+ wil_if_pcie_disable(wil);
+ wil_if_remove(wil);
+ wil_if_free(wil);
+ pci_iounmap(pdev, wil->csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+ { PCI_DEVICE(0x1ae9, 0x0301) },
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000000..f29c294413cf
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,871 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+ " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+ return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+ return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+ vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+ return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ int used = (vring->size + swhead - swtail) % vring->size;
+
+ return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
+ vring->size);
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+ /*
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+ vring->size);
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+ vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+ int tx)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ while (!wil_vring_is_empty(vring)) {
+ if (tx) {
+ volatile struct vring_tx_desc *d =
+ &vring->va[vring->swtail].tx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ }
+ vring->swtail = wil_vring_next_tail(vring);
+ } else { /* rx */
+ volatile struct vring_rx_desc *d =
+ &vring->va[vring->swtail].rx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ wil_vring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = RX_BUF_LEN;
+ volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+ dma_addr_t pa;
+
+ /* TODO align */
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = sz;
+ vring->ctx[i] = skb;
+
+ return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ volatile struct vring_rx_desc *d)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct wil6210_rtap_vendor {
+ struct wil6210_rtap rtap;
+ /* vendor */
+ u8 vendor_oui[3] __aligned(2);
+ u8 vendor_ns;
+ __le16 vendor_skip;
+ u8 vendor_data[0];
+ } __packed;
+ struct wil6210_rtap_vendor *rtap_vendor;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ int phy_length = 0; /* phy info header size, bytes */
+ static char phy_data[128];
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ if (rtap_include_phy_info) {
+ rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+ /* calculate additional length */
+ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+ /**
+ * PHY info starts from 8-byte boundary
+ * there are 8-byte lines, last line may be partially
+ * written (HW bug), thus FW configures for last line
+ * to be excessive. Driver skips this last line.
+ */
+ int len = min_t(int, 8 + sizeof(phy_data),
+ wil_rxdesc_phy_length(d));
+ if (len > 8) {
+ void *p = skb_tail_pointer(skb);
+ void *pa = PTR_ALIGN(p, 8);
+ if (skb_tailroom(skb) >= len + (pa - p)) {
+ phy_length = len - 8;
+ memcpy(phy_data, pa, phy_length);
+ }
+ }
+ }
+ rtap_len += phy_length;
+ }
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap_vendor = (void *)skb_push(skb, rtap_len);
+ memset(rtap_vendor, 0, rtap_len);
+
+ rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+ (1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+ rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap_vendor->rtap.mcs_flags = 0;
+ rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+ if (rtap_include_phy_info) {
+ rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+ /* OUI for Wilocity 04:ce:14 */
+ rtap_vendor->vendor_oui[0] = 0x04;
+ rtap_vendor->vendor_oui[1] = 0xce;
+ rtap_vendor->vendor_oui[2] = 0x14;
+ rtap_vendor->vendor_ns = 1;
+ /* Rx descriptor + PHY data */
+ rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+ phy_length);
+ memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+ memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+ phy_length);
+ }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+ *a ^= *b;
+ *b ^= *a;
+ *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+ struct ethhdr *eth = data;
+ u16 *s = (u16 *)eth->h_source;
+ u16 *d = (u16 *)eth->h_dest;
+
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+ volatile struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int sz = RX_BUF_LEN;
+ u8 ftype;
+ u8 ds_bits;
+
+ if (wil_vring_is_empty(vring))
+ return NULL;
+
+ d = &(vring->va[vring->swhead].rx);
+ if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ skb_trim(skb, d->dma.length);
+
+ wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb, d);
+
+ wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+ wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_vring_advance_head(vring, 1);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /*
+ * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (ftype != IEEE80211_FTYPE_DATA) {
+ wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ /* TODO: process it */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ wil_err(wil, "Short frame, len = %d\n", skb->len);
+ /* TODO: process it (i.e. BAR) */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ ds_bits = wil_rxdesc_ds_bits(d);
+ if (ds_bits == 1) {
+ /*
+ * HW bug - in ToDS mode, i.e. Rx on AP side,
+ * addresses get swapped
+ */
+ wil_swap_ethaddr(skb->data);
+ }
+
+ return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_vring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (rc) {
+ wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+ iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+ return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+ unsigned int len = skb->len;
+
+ if (in_interrupt())
+ rc = netif_rx(skb);
+ else
+ rc = netif_rx_ni(skb);
+
+ if (likely(rc == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ struct sk_buff *skb;
+
+ if (!v->va) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+ while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+ wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ skb_orphan(skb);
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ } else {
+ skb->protocol = eth_type_trans(skb, ndev);
+ }
+
+ wil_netif_rx_any(skb, ndev);
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct vring *vring = &wil->vring_rx;
+ int rc;
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+
+ vring->size = WIL6210_RX_RING_SIZE;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ }
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ goto err_free;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring, 0);
+
+ return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+
+ if (vring->va) {
+ int rc;
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event cfg;
+ } __packed wmi_rx_cfg_reply;
+
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID,
+ &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
+ 100);
+ wil_vring_free(wil, vring, 0);
+ }
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid)
+{
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ },
+ .ringid = id,
+ .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 16,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (!vring->va)
+ return;
+
+ wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v = &wil->vring_tx[0];
+
+ if (v->va)
+ return v;
+
+ return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+ dma_addr_t pa, u32 len)
+{
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = len;
+ d->dma.d0 = 0;
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* use dst index 0 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+ (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ volatile struct vring_tx_desc *d;
+ u32 swhead = vring->swhead;
+ int avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f;
+ int vring_index = vring - wil->vring_tx;
+ uint i = swhead;
+ dma_addr_t pa;
+
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+
+ if (avail < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ if (avail < 1 + nr_frags) {
+ wil_err(wil, "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
+ return -ENOMEM;
+ }
+ d = &(vring->va[i].tx);
+
+ /* FIXME FW can accept only unicast frames for the peer */
+ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+ pa = dma_map_single(dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+ skb->data, (unsigned long long)pa);
+ wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ /* 1-st segment */
+ wil_tx_desc_map(d, pa, skb_headlen(skb));
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ /* middle segments */
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+ i = (swhead + f + 1) % vring->size;
+ d = &(vring->va[i].tx);
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+ wil_tx_desc_map(d, pa, len);
+ vring->ctx[i] = NULL;
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+ wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* advance swhead */
+ wil_vring_advance_head(vring, nr_frags + 1);
+ wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i] = skb_get(skb);
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ /* Note: increment @f to operate with positive index */
+ for (f++; f > 0; f--) {
+ i = (swhead + f) % vring->size;
+ d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ if (vring->ctx[i])
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+
+ return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct vring *vring;
+ int rc;
+
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ goto drop;
+ }
+ if (!test_bit(wil_status_fwconnected, &wil->status)) {
+ wil_err(wil, "FW not connected\n");
+ goto drop;
+ }
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ rc = wmi_tx_eapol(wil, skb);
+ } else {
+ /* find vring */
+ vring = wil_find_tx_vring(wil, skb);
+ if (!vring) {
+ wil_err(wil, "No Tx VRING available\n");
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+ }
+ switch (rc) {
+ case 0:
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ return NETDEV_TX_BUSY;
+ default:
+ ; /* goto drop; */
+ break;
+ }
+ drop:
+ netif_tx_stop_all_queues(ndev);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct vring *vring = &wil->vring_tx[ringid];
+
+ if (!vring->va) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return;
+ }
+
+ wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+
+ while (!wil_vring_is_empty(vring)) {
+ volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+ dma_addr_t pa;
+ struct sk_buff *skb;
+ if (!(d->dma.status & TX_DMA_STATUS_DU))
+ break;
+
+ wil_dbg_TXRX(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, d->dma.length, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+ d->dma.addr_low = 0;
+ d->dma.addr_high = 0;
+ d->dma.length = 0;
+ d->dma.status = TX_DMA_STATUS_DU;
+ vring->swtail = wil_vring_next_tail(vring);
+ }
+ if (wil_vring_avail_tx(vring) > vring->size/4)
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000000..45a61f597c5c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN (2048)
+#define TX_BUF_LEN (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrup_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit 30 : reserved2:1
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5.. 7 : reserved0:3
+ * bit 8..13 : reserved1:6
+ * bit 14 : reserved2:1
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+struct vring_tx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : connection_id:3 :The Source index that was found during
+ * Parsing the TA. This field is used to define the source of the packet
+ * bit 7 : reserved:1
+ * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1
+ * bit 8.. 9 : ds_bits:2
+ * bit 10 : a_msdu_present:1 from qos header
+ * bit 11 : a_msdu_type:1 from qos header
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version
+ * bit 4 : fc_order:1 The FC Control (b15) -Order
+ * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8.. 9 : reserved:2
+ * bit 10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1
+ * [dword 3]
+ * [byte 12] error
+ * [byte 13] status
+ * bit 0 : du:1
+ * bit 1 : eop:1
+ * bit 2 : error:1
+ * bit 3 : mi:1
+ * bit 4 : l3_identified:1
+ * bit 5 : l4_identified:1
+ * bit 6 : phy_info_included:1
+ * bit 7 : reserved:1
+ * [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+
+struct vring_rx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ u16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+ struct vring_tx_desc tx;
+ struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000000..9bcfffa4006c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_TX_QUEUES (4)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT0 BIT(28)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT1 BIT(29)
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* popular locations */
+#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+ offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+struct wil6210_mbox_hdr_wmi {
+ u8 reserved0[2];
+ __le16 id;
+ __le16 info1; /* bits [0..3] - device_id, rest - unused */
+ u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+ dma_addr_t pa;
+ volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+ u16 size; /* number of vring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0,
+ wil_status_fwconnected,
+ wil_status_dontscan,
+ wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+ u64 tsf;
+ u32 snr;
+ u16 last_mcs_rx;
+ u16 bf_mcs; /* last BF, used for Tx */
+ u16 my_rx_sector;
+ u16 my_tx_sector;
+ u16 peer_rx_sector;
+ u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ int n_msi;
+ struct wireless_dev *wdev;
+ void __iomem *csr;
+ ulong status;
+ /* profile */
+ u32 monitor_flags;
+ u32 secure_pcp; /* create secure PCP? */
+ int sinfo_gen;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+ struct work_struct wmi_connect_worker;
+ struct work_struct disconnect_worker;
+ struct timer_list connect_timer;
+ int pending_connect_cid;
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ /* DMA related */
+ struct vring vring_rx;
+ struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+ u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* statistics */
+ struct wil6210_stats stats;
+ /* debugfs */
+ struct dentry *debug;
+ struct debugfs_blob_wrapper fw_code_blob;
+ struct debugfs_blob_wrapper fw_data_blob;
+ struct debugfs_blob_wrapper fw_peri_blob;
+ struct debugfs_blob_wrapper uc_code_blob;
+ struct debugfs_blob_wrapper uc_data_blob;
+ struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+
+#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000000..12915f6e7617
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
+ * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+ {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+ {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+ {0x880000, 0x88a000, 0x880000}, /* various RGF */
+ {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+ /*
+ * 920000..930000 ucode code RAM
+ * 930000..932000 ucode data RAM
+ */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .id = cpu_to_le16(cmdid),
+ .info1 = 0,
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+
+ if (sizeof(cmd) + len > r->entry_size) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ return -EAGAIN;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ return -EBUSY;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ r->tail = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ return -EBUSY;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ return -EINVAL;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ iowrite32(1, wil->csr + HOSTADDR(r->head) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* advance next ptr */
+ iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.head));
+
+ /* interrupt to FW */
+ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+ return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_ready_event *evt = d;
+ u32 ver = le32_to_cpu(evt->sw_version);
+
+ wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+ memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+ }
+ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+ "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ wil_dbg_WMI(wil, "WMI: FW ready\n");
+
+ set_bit(wil_status_fwready, &wil->status);
+ /* reuse wmi_ready for the firmware ready indication */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int ch_no = data->info.channel+1;
+ u32 freq = ieee80211_channel_to_frequency(ch_no,
+ IEEE80211_BAND_60GHZ);
+ struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+ /* TODO convert LE to CPU */
+ s32 signal = 0; /* TODO */
+ __le16 fc = rx_mgmt_frame->frame_control;
+ u32 d_len = le32_to_cpu(data->info.len);
+ u16 d_status = le16_to_cpu(data->info.status);
+
+ wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+ data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+ le16_to_cpu(data->info.stype));
+ wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+
+ bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+ tsf, cap, bi, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_WMI(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss() failed\n");
+ }
+ }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ if (wil->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ bool aborted = (data->status != 0);
+
+ wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ cfg80211_scan_done(wil->scan_request, aborted);
+ wil->scan_request = NULL;
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ ch = evt->channel + 1;
+ wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
+ wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ wil_err(wil, "Not in connecting state\n");
+ return;
+ }
+ del_timer_sync(&wil->connect_timer);
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo.assoc_req_ies = assoc_req_ie;
+ sinfo.assoc_req_ies_len = assoc_req_ielen;
+ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ }
+ set_bit(wil_status_fwconnected, &wil->status);
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+ wil->pending_connect_cid = evt->cid;
+ queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_disconnect_event *evt = d;
+
+ wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+ evt->bssid,
+ evt->protocol_reason_status, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ wil6210_disconnect(wil, evt->bssid);
+ clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_notify_req_done_event *evt = d;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Short NOTIFY event\n");
+ return;
+ }
+
+ wil->stats.tsf = le64_to_cpu(evt->tsf);
+ wil->stats.snr = le32_to_cpu(evt->snr_val);
+ wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+ wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+ wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+ wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+ wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+ wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+ "BF status 0x%08x SNR 0x%08x\n"
+ "Tx Tpt %d goodput %d Rx goodput %d\n"
+ "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+ wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+ wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+
+ wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+ evt->src_mac);
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+ eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_priv *wil, int eventid,
+ void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+
+ for (;;) {
+ u16 len;
+
+ r->head = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ return;
+
+ /* read cmd from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ return;
+ }
+
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ return;
+ }
+
+ len = le16_to_cpu(hdr.len);
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt) {
+ wil_err(wil, "kmalloc for WMI event (%d) failed\n",
+ len);
+ return;
+ }
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* indicate */
+ wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+ evt->event.wmi.id);
+ }
+ wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ iowrite32(r->tail, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ {
+ int q = queue_work(wil->wmi_wq,
+ &wil->wmi_event_worker);
+ wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+ }
+ }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+ int rc;
+ int remain;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ rc = __wmi_send(wil, cmdid, buf, len);
+ if (rc)
+ goto out;
+
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ remain = wait_for_completion_timeout(&wil->wmi_ready,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_WMI(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+ wil->reply_id = 0;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ out:
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+ struct wmi_set_mac_address_cmd cmd;
+
+ memcpy(cmd.mac, addr, ETH_ALEN);
+
+ wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+ struct wmi_bcon_ctrl_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ };
+
+ if (!wil->secure_pcp)
+ cmd.disable_sec = 1;
+
+ return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+ &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wmi_eapol_tx_cmd *cmd;
+ struct ethhdr *eth;
+ u16 eapol_len = skb->len - ETH_HLEN;
+ void *eapol = skb->data + ETH_HLEN;
+ uint i;
+ int rc;
+
+ skb_set_mac_header(skb, 0);
+ eth = eth_hdr(skb);
+ wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+ goto found_dest;
+ }
+
+ return -EINVAL;
+
+ found_dest:
+ /* find out eapol data & len */
+ cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+ if (!cmd)
+ return -EINVAL;
+
+ memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+ cmd->eapol_len = cpu_to_le16(eapol_len);
+ memcpy(cmd->eapol, eapol, eapol_len);
+ rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr)
+{
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key)
+{
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_len = key_len,
+ };
+
+ if (!key || (key_len > sizeof(cmd.key)))
+ return -EINVAL;
+
+ memcpy(cmd.key, key, key_len);
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ wil_err(wil, "kmalloc(%d) failed\n", len);
+ return -ENOMEM;
+ }
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+ kfree(cmd);
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_WMI(wil, "%s()\n", __func__);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(wil, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->id);
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ } else {
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ }
+ wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+ complete(&wil->wmi_ready);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_err(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_connect_worker);
+
+ if (wil->pending_connect_cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+ wil->pending_connect_cid);
+
+ rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+ wil->pending_connect_cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0)
+ wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000000..3bbf87572b07
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_START_SCAN_CMDID = 0x0007,
+ WMI_SET_BSS_FILTER_CMDID = 0x0009,
+ WMI_SET_PROBED_SSID_CMDID = 0x000a,
+ WMI_SET_LISTEN_INT_CMDID = 0x000b,
+ WMI_BCON_CTRL_CMDID = 0x000f,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
+ WMI_SET_APPIE_CMDID = 0x003f,
+ WMI_GET_APPIE_CMDID = 0x0040,
+ WMI_SET_WSC_STATUS_CMDID = 0x0041,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
+ WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300,
+ WMI_MEM_READ_CMDID = 0x0800,
+ WMI_MEM_WR_CMDID = 0x0801,
+ WMI_ECHO_CMDID = 0x0803,
+ WMI_DEEP_ECHO_CMDID = 0x0804,
+ WMI_CONFIG_MAC_CMDID = 0x0805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
+ WMI_ADD_STATION_CMDID = 0x0807,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
+ WMI_FS_TUNE_CMDID = 0x080a,
+ WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_TEMP_SENSE_CMDID = 0x080e,
+ WMI_DC_CALIB_CMDID = 0x080f,
+ WMI_SEND_TONE_CMDID = 0x0810,
+ WMI_IQ_TX_CALIB_CMDID = 0x0811,
+ WMI_IQ_RX_CALIB_CMDID = 0x0812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x0813,
+ WMI_SET_WORK_MODE_CMDID = 0x0815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
+ WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
+ WMI_MARLON_R_READ_CMDID = 0x0818,
+ WMI_MARLON_R_WRITE_CMDID = 0x0819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_CFG_RX_CHAIN_CMDID = 0x0820,
+ WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_RX_ON_CMDID = 0x0822,
+ WMI_VRING_BA_EN_CMDID = 0x0823,
+ WMI_VRING_BA_DIS_CMDID = 0x0824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
+ WMI_RCP_DELBA_CMDID = 0x0826,
+ WMI_SET_SSID_CMDID = 0x0827,
+ WMI_GET_SSID_CMDID = 0x0828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
+ WMI_SW_TX_REQ_CMDID = 0x082b,
+ WMI_RX_OFF_CMDID = 0x082c,
+ WMI_READ_MAC_RXQ_CMDID = 0x0830,
+ WMI_READ_MAC_TXQ_CMDID = 0x0831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
+ WMI_MLME_PUSH_CMDID = 0x0835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x0837,
+ WMI_BF_SM_MGMT_CMDID = 0x0838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_SET_SECTORS_CMDID = 0x0849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x0851,
+ WMI_RS_MGMT_CMDID = 0x0852,
+ WMI_RF_MGMT_CMDID = 0x0853,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x0862,
+ WMI_NOTIFY_REQ_CMDID = 0x0863,
+ WMI_GET_STATUS_CMDID = 0x0864,
+ WMI_UNIT_TEST_CMDID = 0x0900,
+ WMI_HICCUP_CMDID = 0x0901,
+ WMI_FLASH_READ_CMDID = 0x0902,
+ WMI_FLASH_WRITE_CMDID = 0x0903,
+ WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+
+ WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
+ WMI_ABORT_SCAN_CMDID = 0xf007,
+ WMI_SET_PMK_CMDID = 0xf028,
+
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
+ WMI_GET_PMK_CMDID = 0xf048,
+ WMI_SET_PASSPHRASE_CMDID = 0xf049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
+ WMI_EAPOL_TX_CMDID = 0xf04c,
+ WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
+ WMI_FW_VER_CMDID = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_WEP = 0x02,
+ WMI_CRYPT_TKIP = 0x04,
+ WMI_CRYPT_AES = 0x08,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
+ WMI_CONNECT_SEND_REASSOC = 0x0002,
+ WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+ u8 channel; /* hint */
+ u8 reserved;
+ u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX (0)
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+#define WMI_PMK_LEN (32)
+
+struct wmi_set_pmk_cmd {
+ u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0,
+ WMI_KEY_USE_GROUP = 1,
+ WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ u8 key_usage; /* enum wmi_key_usage */
+ u8 key_len;
+ u8 key_rsc[8]; /* key replay sequence counter */
+ u8 key[WMI_MAX_KEY_LEN];
+ u8 key_op_ctrl; /* Additional Key Control information */
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ u8 reserved[8];
+ __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+ __le32 force_scan_interval; /* Time interval between scans (ms)*/
+ u8 scan_type; /* wmi_scan_type */
+ u8 num_channels; /* how many channels follow */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[0]; /* channels ID's */
+ /* 0 - 58320 MHz */
+ /* 1 - 60480 MHz */
+ /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX (15)
+
+enum wmi_ssid_flag {
+ WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
+ WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
+ WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+ u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 flag; /* enum wmi_ssid_flag */
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 reserved;
+ __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0,
+ WMI_RF_MGMT_W_ENABLE = 1,
+ WMI_RF_MGMT_GET_STATUS = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 reserved;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0,
+ WMI_VRING_DS_STATION = 1,
+ WMI_VRING_DS_AP = 2,
+ WMI_VRING_DS_ADDR4 = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0,
+ WMI_SCH_PRIO_HIGH = 1,
+};
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+ u8 mac_ctrl;
+
+ #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0,
+ WMI_VRING_CMD_MODIFY = 1,
+ WMI_VRING_CMD_DELETE = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+ u8 ringid;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+ u8 ringid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 reserved[3];
+ __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0,
+ WMI_SNIFFER_ON = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0,
+ WMI_SNIFFER_DP = 1,
+ WMI_SNIFFER_BOTH_PHYS = 2,
+};
+
+struct wmi_sniffer_cfg {
+ __le32 mode; /* enum wmi_sniffer_cfg_mode */
+ __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0,
+ WMI_RX_CHAIN_DEL = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ u8 l2_802_3_offload_ctrl;
+
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+ u8 l2_nwifi_offload_ctrl;
+
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+ u8 l3_l4_ctrl;
+
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+ u8 ring_ctrl;
+
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reserved[2];
+ struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 status_code;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_IMM_RSP_EVENTID = 0x0000,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100a,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
+ WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
+ WMI_ADD_STATION_DONE_EVENTID = 0x1807,
+ WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
+ WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180a,
+ WMI_CORR_MEASURE_DONE_EVENTID = 0x180b,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
+
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_RX_ON_DONE_EVENTID = 0x1822,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
+ WMI_RX_OFF_DONE_EVENTID = 0x182c,
+
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+
+ WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0,
+ WMI_RF_DISABLED_HW = 1,
+ WMI_RF_DISABLED_SW = 2,
+ WMI_RF_DISABLED_HW_SW = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+ u8 major;
+ u8 minor;
+ __le16 subminor;
+ __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ WMI_11AD_CAPABILITY = 7,
+ WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ u8 phy_capability; /* enum wmi_phy_capability */
+ u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+ __le32 status;
+ __le64 tsf;
+ __le32 snr_val;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 reserved2[3];
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
+ WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
+ WMI_DIS_REASON_DISCONNECT_CMD = 3,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 4,
+ WMI_DIS_REASON_AUTH_FAILED = 5,
+ WMI_DIS_REASON_ASSOC_FAILED = 6,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 8,
+ WMI_DIS_REASON_INVALID_PROFILE = 10,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 12,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 13,
+ WMI_DIS_REASON_IBSS_MERGE = 14,
+};
+
+struct wmi_disconnect_event {
+ __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
+ u8 bssid[WMI_MAC_LEN]; /* set if known */
+ u8 disconnect_reason; /* see wmi_disconnect_reason_e */
+ u8 assoc_resp_len;
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0,
+ WMI_BA_NON_AGREED = 1,
+};
+
+struct wmi_vring_ba_status_event {
+ __le16 status;
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 from_initiator;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+ WMI_VRING_CFG_SUCCESS = 0,
+ WMI_VRING_CFG_FAILURE = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+ WMI_ADDBA_SUCCESS = 0,
+ WMI_ADDBA_FAIL = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ __le16 ba_timeout;
+ __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0,
+ WMI_WBE_REASON_RX_DISASSOC = 1,
+ WMI_WBE_REASON_BAD_PHY_LINK = 2,
+};
+
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
+ WMI_TX_SW_STATUS_FAILED_TX = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+ u8 status; /* enum wmi_sw_tx_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 snr;
+ __le16 range;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ u8 qid;
+ u8 mid;
+ u8 cid;
+ u8 channel; /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+ __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b298e5d68be2..10e288d470e7 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -7,6 +7,7 @@
#include <linux/hw_random.h>
#include <linux/bcma/bcma.h>
#include <linux/ssb/ssb.h>
+#include <linux/completion.h>
#include <net/mac80211.h>
#include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
+ /* a pointer to the firmware object */
+ const struct firmware *blob;
/* The type of firmware to request. */
enum b43_firmware_file_type req_type;
/* Error messages for each firmware type. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 16ab280359bd..806e34c19281 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
b43warn(wl, text);
}
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct b43_request_fw_context *ctx = context;
+
+ ctx->blob = firmware;
+ complete(&ctx->fw_load_complete);
+}
+
int b43_do_request_fw(struct b43_request_fw_context *ctx,
const char *name,
- struct b43_firmware_file *fw)
+ struct b43_firmware_file *fw, bool async)
{
- const struct firmware *blob;
struct b43_fw_header *hdr;
u32 size;
int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
B43_WARN_ON(1);
return -ENOSYS;
}
- err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+ if (async) {
+ /* do this part asynchronously */
+ init_completion(&ctx->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+ ctx->dev->dev->dev, GFP_KERNEL,
+ ctx, b43_fw_cb);
+ if (err < 0) {
+ pr_err("Unable to load firmware\n");
+ return err;
+ }
+ /* stall here until fw ready */
+ wait_for_completion(&ctx->fw_load_complete);
+ if (ctx->blob)
+ goto fw_ready;
+ /* On some ARM systems, the async request will fail, but the next sync
+ * request works. For this reason, we dall through here
+ */
+ }
+ err = request_firmware(&ctx->blob, ctx->fwname,
+ ctx->dev->dev->dev);
if (err == -ENOENT) {
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
- "Firmware file \"%s\" not found\n", ctx->fwname);
+ "Firmware file \"%s\" not found\n",
+ ctx->fwname);
return err;
} else if (err) {
snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
ctx->fwname, err);
return err;
}
- if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+ if (ctx->blob->size < sizeof(struct b43_fw_header))
goto err_format;
- hdr = (struct b43_fw_header *)(blob->data);
+ hdr = (struct b43_fw_header *)(ctx->blob->data);
switch (hdr->type) {
case B43_FW_TYPE_UCODE:
case B43_FW_TYPE_PCM:
size = be32_to_cpu(hdr->size);
- if (size != blob->size - sizeof(struct b43_fw_header))
+ if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
/* fallthrough */
case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
goto err_format;
}
- fw->data = blob;
+ fw->data = ctx->blob;
fw->filename = name;
fw->type = ctx->req_type;
@@ -2172,7 +2200,7 @@ err_format:
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
"Firmware file \"%s\" format error.\n", ctx->fwname);
- release_firmware(blob);
+ release_firmware(ctx->blob);
return -EPROTO;
}
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_ucode;
}
}
- err = b43_do_request_fw(ctx, filename, &fw->ucode);
+ err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
if (err)
goto err_load;
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_pcm;
fw->pcm_request_failed = false;
- err = b43_do_request_fw(ctx, filename, &fw->pcm);
+ err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
if (err)
goto err_load;
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
if (err)
goto err_load;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 8c684cd33529..abac25ee958d 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
- const char *name,
- struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+ struct b43_firmware_file *fw, bool async);
void b43_do_release_fw(struct b43_firmware_file *fw);
#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 000000000000..231ae187f404
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,47 @@
+config BCMDHD
+ tristate "Broadcom 4329/30 wireless cards support"
+ depends on MMC
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom 4329/30 chipset.
+
+ This driver uses the kernel's wireless extensions subsystem.
+
+ If you choose to build a module, it'll be called dhd. Say M if
+ unsure.
+
+config BCMDHD_FW_PATH
+ depends on BCMDHD
+ string "Firmware path"
+ default "/system/etc/firmware/fw_bcmdhd.bin"
+ ---help---
+ Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+ depends on BCMDHD
+ string "NVRAM path"
+ default "/system/etc/wifi/bcmdhd.cal"
+ ---help---
+ Path to the calibration file.
+
+config BCMDHD_WEXT
+ bool "Enable WEXT support"
+ depends on BCMDHD && CFG80211 = n
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ Enables WEXT support
+
+config DHD_USE_STATIC_BUF
+ bool "Enable memory preallocation"
+ depends on BCMDHD
+ default n
+ ---help---
+ Use memory preallocated in platform
+
+config DHD_USE_SCHED_SCAN
+ bool "Use CFG80211 sched scan"
+ depends on BCMDHD && CFG80211
+ default n
+ ---help---
+ Use CFG80211 sched scan
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 000000000000..7925661ce62b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,39 @@
+# bcmdhd
+# -DDHDTHREAD -DDHD_GPL -DDHD_SCHED -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
+# -DDHD_BCMEVENTS -DSHOW_EVENTS -DDONGLEOVERLAYS -DBCMDBG \
+# -DCUSTOMER_HW2 -DOOB_INTR_ONLY -DHW_OOB \
+# -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
+# -DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
+# -DKEEP_ALIVE -DCSCAN -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \
+# -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
+ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
+ -DDHD_BCMEVENTS -DSHOW_EVENTS -DPROP_TXSTATUS -DBCMDBG \
+ -DCUSTOMER_HW2 -DOOB_INTR_ONLY -DHW_OOB \
+ -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
+ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
+ -DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \
+ -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
+ -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include
+
+DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \
+ dhd_linux_sched.o dhd_sdio.o bcmwifi_channels.o bcmevent.o hndpmu.o \
+ bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o linux_osl.o \
+ bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+ifneq ($(CONFIG_WIRELESS_EXT),)
+bcmdhd-objs += wl_iw.o
+DHDCFLAGS += -DSOFTAP -DUSE_IW
+endif
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o
+DHDCFLAGS += -DWL_CFG80211 -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+endif
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 000000000000..f1db5a2b9351
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,836 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c 321247 2012-03-14 21:14:33Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+#define BCM47162_DMP() (0)
+#define BCM5357_DMP() (0)
+#define remap_coreid(sih, coreid) (coreid)
+#define remap_corerev(sih, corerev) (corerev)
+
+
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+
+ while (TRUE) {
+ ent = R_REG(si_osh(sih), *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+ uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, *eromptr, *eromlim;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = (uint32 *)(uintptr)erombase;
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+ regs, erombase, eromptr, eromlim));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+ ai_hwfixup(sii);
+ return;
+ }
+
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
+#else
+ BCM_REFERENCE(crev);
+#endif
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ sii->oob_router = addrl;
+ }
+ }
+ if (cid != GMAC_COMMON_4706_CORE_ID)
+ continue;
+ }
+
+ idx = sii->numcores;
+
+ sii->cia[idx] = cia;
+ sii->cib[idx] = cib;
+ sii->coreid[idx] = remap_coreid(sih, cid);
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else
+ if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+ "(0x%08x)\n", cid, asd));
+ goto error;
+ }
+ }
+ sii->coresba[idx] = addrl;
+ sii->coresba_size[idx] = sizel;
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+ sii->coresba2[idx] = addrl;
+ sii->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+
+ if (asd == 0)
+ break;
+ j++;
+ } while (1);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0)
+ sii->wrapba[idx] = addrl;
+ }
+
+
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->wrapba[idx] = addrl;
+ }
+
+
+
+ if (br)
+ continue;
+
+
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 addr, wrap;
+ void *regs;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return (NULL);
+
+ addr = sii->coresba[coreidx];
+ wrap = sii->wrapba[coreidx];
+
+
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ sii->curmap = regs = sii->regs[coreidx];
+ if (!sii->wrappers[coreidx]) {
+ sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->wrappers[coreidx]));
+ }
+ sii->curwrap = sii->wrappers[coreidx];
+ break;
+
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ sii->curmap = regs;
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = NULL;
+ uint32 erombase, *eromptr, *eromlim;
+ uint i, j, cidx;
+ uint32 cia, cib, nmp, nsp;
+ uint32 asd, addrl, addrh, sizel, sizeh;
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (sii->coreid[i] == CC_CORE_ID) {
+ cc = (chipcregs_t *)sii->regs[i];
+ break;
+ }
+ }
+ if (cc == NULL)
+ goto error;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ cidx = sii->curidx;
+ cia = sii->cia[cidx];
+ cib = sii->cib[cidx];
+
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+
+ while (eromptr < eromlim) {
+ if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+ (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+ break;
+ }
+ }
+
+
+ for (i = 0; i < nmp; i++)
+ get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ }
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ j++;
+ } while (asd != 0);
+
+
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0)
+ break;
+
+ if (!asidx--) {
+ *addr = addrl;
+ *size = sizel;
+ return;
+ }
+ j++;
+ } while (1);
+
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ break;
+ }
+ }
+
+error:
+ *size = 0;
+ return;
+}
+
+
+int
+ai_numaddrspaces(si_t *sih)
+{
+ return 2;
+}
+
+
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba[cidx];
+ else if (asidx == 1)
+ return sii->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+ return sii->curidx;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+ return sii->curidx;
+ }
+ ai = sii->curwrap;
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 *map = (uint32 *) sii->curwrap;
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, map+(offset/4));
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, map+(offset/4), val);
+ }
+
+ return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cia;
+
+ sii = SI_INFO(sih);
+ cia = sii->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cib;
+
+ sii = SI_INFO(sih);
+ cib = sii->cib[sii->curidx];
+ return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+
+ fast = TRUE;
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+
+ origidx = si_coreidx(&sii->pub);
+
+
+ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ uint32 status;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+ return;
+
+
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+ if (status != 0) {
+
+
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+
+
+ }
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ ai_core_disable(sih, (bits | resetbits));
+
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+
+ W_REG(sii->osh, &ai->resetctrl, 0);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __FUNCTION__));
+ return;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __FUNCTION__));
+ return 0;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
+ __FUNCTION__));
+ return 0;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 000000000000..cecea6049424
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,148 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c 327460 2012-04-13 18:38:41Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+#if WLC_E_LAST != 94
+#error "You need to add an entry to bcmevent_names[] for the new event"
+#endif
+
+const bcmevent_name_t bcmevent_names[] = {
+ { WLC_E_SET_SSID, "SET_SSID" },
+ { WLC_E_JOIN, "JOIN" },
+ { WLC_E_START, "START" },
+ { WLC_E_AUTH, "AUTH" },
+ { WLC_E_AUTH_IND, "AUTH_IND" },
+ { WLC_E_DEAUTH, "DEAUTH" },
+ { WLC_E_DEAUTH_IND, "DEAUTH_IND" },
+ { WLC_E_ASSOC, "ASSOC" },
+ { WLC_E_ASSOC_IND, "ASSOC_IND" },
+ { WLC_E_REASSOC, "REASSOC" },
+ { WLC_E_REASSOC_IND, "REASSOC_IND" },
+ { WLC_E_DISASSOC, "DISASSOC" },
+ { WLC_E_DISASSOC_IND, "DISASSOC_IND" },
+ { WLC_E_QUIET_START, "START_QUIET" },
+ { WLC_E_QUIET_END, "END_QUIET" },
+ { WLC_E_BEACON_RX, "BEACON_RX" },
+ { WLC_E_LINK, "LINK" },
+ { WLC_E_MIC_ERROR, "MIC_ERROR" },
+ { WLC_E_NDIS_LINK, "NDIS_LINK" },
+ { WLC_E_ROAM, "ROAM" },
+ { WLC_E_TXFAIL, "TXFAIL" },
+ { WLC_E_PMKID_CACHE, "PMKID_CACHE" },
+ { WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
+ { WLC_E_PRUNE, "PRUNE" },
+ { WLC_E_AUTOAUTH, "AUTOAUTH" },
+ { WLC_E_EAPOL_MSG, "EAPOL_MSG" },
+ { WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_ADDTS_IND, "ADDTS_IND" },
+ { WLC_E_DELTS_IND, "DELTS_IND" },
+ { WLC_E_BCNSENT_IND, "BCNSENT_IND" },
+ { WLC_E_BCNRX_MSG, "BCNRX_MSG" },
+ { WLC_E_BCNLOST_MSG, "BCNLOST_IND" },
+ { WLC_E_ROAM_PREP, "ROAM_PREP" },
+ { WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" },
+ { WLC_E_PFN_NET_LOST, "PFNLOST_IND" },
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+ { WLC_E_IBSS_ASSOC, "IBSS_ASSOC" },
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+ { WLC_E_RADIO, "RADIO" },
+ { WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+ { WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" },
+ { WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
+ { WLC_E_PSK_SUP, "PSK_SUP" },
+ { WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" },
+ { WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
+ { WLC_E_ICV_ERROR, "ICV_ERROR" },
+ { WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
+ { WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
+ { WLC_E_TRACE, "TRACE" },
+#ifdef WLBTAMP
+ { WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" },
+#endif
+ { WLC_E_IF, "IF" },
+#ifdef WLP2P
+ { WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" },
+#endif
+ { WLC_E_RSSI, "RSSI" },
+ { WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" },
+#ifdef WIFI_ACT_FRAME
+ { WLC_E_ACTION_FRAME, "ACTION_FRAME" },
+ { WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" },
+ { WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+#endif
+#if 0 && (NDISVER >= 0x0620)
+ { WLC_E_PRE_ASSOC_IND, "ASSOC_RECV" },
+ { WLC_E_PRE_REASSOC_IND, "REASSOC_RECV" },
+ { WLC_E_CHANNEL_ADOPTED, "CHANNEL_ADOPTED" },
+ { WLC_E_AP_STARTED, "AP_STARTED" },
+ { WLC_E_DFS_AP_STOP, "DFS_AP_STOP" },
+ { WLC_E_DFS_AP_RESUME, "DFS_AP_RESUME" },
+ { WLC_E_ASSOC_IND_NDIS, "ASSOC_IND_NDIS"},
+ { WLC_E_REASSOC_IND_NDIS, "REASSOC_IND_NDIS"},
+ { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
+ { WLC_E_AUTH_REQ, "WLC_E_AUTH_REQ" },
+#endif
+#ifdef BCMWAPI_WAI
+ { WLC_E_WAI_STA_EVENT, "WAI_STA_EVENT" },
+ { WLC_E_WAI_MSG, "WAI_MSG" },
+#endif /* BCMWAPI_WAI */
+ { WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" },
+ { WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" },
+#ifdef WLP2P
+ { WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" },
+ { WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" },
+#endif
+#ifdef PROP_TXSTATUS
+ { WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" },
+#endif
+ { WLC_E_WAKE_EVENT, "WAKE_EVENT" },
+ { WLC_E_DCS_REQUEST, "DCS_REQUEST" },
+ { WLC_E_RM_COMPLETE, "RM_COMPLETE" },
+#ifdef WLMEDIA_HTSF
+ { WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" },
+#endif
+ { WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" },
+ { WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND"},
+ { WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" },
+ { WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" },
+ { WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" },
+#ifdef SOFTAP
+ { WLC_E_GTK_PLUMBED, "GTK_PLUMBED" },
+#endif
+ { WLC_E_ASSOC_REQ_IE, "ASSOC_REQ_IE" },
+ { WLC_E_ASSOC_RESP_IE, "ASSOC_RESP_IE" },
+ { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
+#ifdef WLTDLS
+ { WLC_E_TDLS_PEER_EVENT, "TDLS_PEER_EVENT" },
+#endif /* WLTDLS */
+};
+
+const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 000000000000..6e1a6b0e2144
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,726 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c 300445 2011-12-03 05:37:20Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+ bcmsdh_detach(osh, bcmsdh);
+ return NULL;
+ }
+
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+
+ *regsva = (uint32 *)SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = SI_ENUM_BASE;
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+ if (bcmsdh->sdioh) {
+ sdioh_detach(osh, bcmsdh->sdioh);
+ bcmsdh->sdioh = NULL;
+ }
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += sprintf((char *)ptr, "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+ int err = 0;
+ uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bar0 != bcmsdh->sbwad || force_set) {
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+ if (!err)
+ bcmsdh->sbwad = bar0;
+ else
+ /* invalidate cached window var */
+ bcmsdh->sbwad = 0;
+
+ }
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+ return 0xFFFFFFFF;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+ return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+ return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_sleep(sd, enab);
+#else
+ return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioout(sd, gpio, enab);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 000000000000..30d0d14d12a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,742 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c 312788 2012-02-03 23:06:32Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+ bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+ struct device *dev; /* platform device handle */
+#else
+ struct pci_dev *dev; /* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+ osl_t *osh;
+ void *regs; /* SDIO Host Controller address */
+ bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
+ void *ch;
+ unsigned int oob_irq;
+ unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+ bool oob_irq_registered;
+ bool oob_irq_enable_flag;
+#if defined(OOB_INTR_ONLY)
+ spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_SPI */
+
+ return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int bcmsdh_probe(struct device *dev);
+static int bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs = 0;
+ bcmsdh_info_t *sdh = NULL;
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ struct platform_device *pdev;
+ struct resource *r;
+#endif /* BCMLXSDMMC */
+ int irq = 0;
+ uint32 vendevid;
+ unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ pdev = to_platform_device(dev);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+ irq_flags =
+ IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+ irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ irq = dhd_customer_oob_irq_map(&irq_flags);
+ if (irq < 0) {
+ SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+ return 1;
+ }
+#endif /* defined(OOB_INTR_ONLY) */
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+ if (!(sdh = bcmsdh_attach(osh, (void *)0,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#else
+ if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* BCMLXSDMMC */
+ sdhc->sdh = sdh;
+ sdhc->oob_irq = irq;
+ sdhc->oob_flags = irq_flags;
+ sdhc->oob_irq_registered = FALSE; /* to make sure.. */
+ sdhc->oob_irq_enable_flag = FALSE;
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&sdhc->irq_lock);
+#endif
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(sdh);
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+ (vendevid & 0xFFFF), 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ sdhc = sdhcinfo;
+ drvinfo.detach(sdhc->ch);
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == (void *)dev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc) {
+ SDLX_MSG(("%s: failed\n", __FUNCTION__));
+ return 0;
+ }
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+ dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) */
+
+ return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: NULL,
+ resume: NULL,
+ };
+
+
+extern uint sd_pci_slot; /* Force detection to a particular PCI */
+ /* slot only . Allows for having multiple */
+ /* WL devices at once in a PC */
+ /* Only one instance of dhd will be */
+ /* usable at a time */
+ /* Upper word is bus number, */
+ /* lower word is slot number */
+ /* Default value of 0xffffffff turns this */
+ /* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs;
+ bcmsdh_info_t *sdh = NULL;
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFffff) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Found compatible SDIOHC"
+ :"Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+ pdev->device));
+ return -ENODEV;
+ }
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Using compatible SDIOHC"
+ :"WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = pdev;
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+ (void **)&regs, pdev->irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdhc->sdh = sdh;
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+ bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == pdev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc)
+ return;
+
+ drvinfo.detach(sdhc->ch);
+
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+ return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+ sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+ SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+ error = sdio_function_init();
+ return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+ return 0;
+#else
+ if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+ return 0;
+#endif
+
+ SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+ return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMLXSDMMC)
+ sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+ static bool curstate = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+ if (curstate != enable) {
+ if (enable)
+ enable_irq(sdhcinfo->oob_irq);
+ else
+ disable_irq_nosync(sdhcinfo->oob_irq);
+ curstate = enable;
+ }
+ spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ dhd_pub_t *dhdp;
+
+ dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+ bcmsdh_oob_intr_set(0);
+
+ if (dhdp == NULL) {
+ SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+ return IRQ_HANDLED;
+ }
+
+ dhdsdio_isr((void *)dhdp->bus);
+
+ return IRQ_HANDLED;
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+ int error = 0;
+
+ SDLX_MSG(("%s Enter \n", __FUNCTION__));
+
+ /* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+ dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+ if (!sdhcinfo->oob_irq_registered) {
+ SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__,
+ (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+ /* Refer to customer Host IRQ docs about proper irqflags definition */
+ error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+ "bcmsdh_sdmmc", NULL);
+ if (error)
+ return -ENODEV;
+
+ enable_irq_wake(sdhcinfo->oob_irq);
+ sdhcinfo->oob_irq_registered = TRUE;
+ sdhcinfo->oob_irq_enable_flag = TRUE;
+ }
+
+ return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+ if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) {
+ SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+ sdhcinfo->oob_irq_enable_flag = flag;
+ if (flag) {
+ enable_irq(sdhcinfo->oob_irq);
+ enable_irq_wake(sdhcinfo->oob_irq);
+ } else {
+ disable_irq_wake(sdhcinfo->oob_irq);
+ disable_irq(sdhcinfo->oob_irq);
+ }
+ }
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+ if (sdhcinfo->oob_irq_registered == TRUE) {
+ bcmsdh_set_irq(FALSE);
+ free_irq(sdhcinfo->oob_irq, NULL);
+ sdhcinfo->oob_irq_registered = FALSE;
+ }
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+#if defined(BCMLXSDMMC)
+void *bcmsdh_get_drvdata(void)
+{
+ if (!sdhcinfo)
+ return NULL;
+ return dev_get_drvdata(sdhcinfo->dev);
+}
+#endif
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 000000000000..afe4019a160f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1432 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 321372 2012-03-15 01:10:32Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 512; /* Default blocksize */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(gInstance->func[1]);
+ err_ret = sdio_enable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (gInstance == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (sdioh_sdmmc_osinit(sd) != 0) {
+ sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+ sd->use_rxchain = FALSE;
+
+ gInstance->sd = sd;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ err_ret = sdio_set_block_size(gInstance->func[1], 64);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+ sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_disable_func(gInstance->func[2]);
+ sdio_release_host(gInstance->func[2]);
+
+ /* Disable Function 1 */
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_disable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ gInstance->func[1] = NULL;
+ gInstance->func[2] = NULL;
+
+ /* deregister irq */
+ sdioh_sdmmc_osfree(sd);
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Enable F1 and F2 interrupts, set master enable */
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(gInstance->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+
+ sdio_release_host(gInstance->func[0]);
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (gInstance->func[1]) {
+ /* register and unmask irq */
+ sdio_claim_host(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_release_irq(gInstance->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = (int32)si->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable)
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+ else
+ data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret;
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ err_ret));
+ }
+ }
+ sdio_release_host(gInstance->func[2]);
+ }
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ sdio_claim_host(gInstance->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+#endif /* MMC_SDIO_ABORT */
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ sdio_claim_host(gInstance->func[func]);
+
+ if (func == 0) {
+ *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+ }
+
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(gInstance->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+ rw ? "Write" : "Read", err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint32 SGCount = 0;
+ int err_ret = 0;
+ void *pnext, *pprev;
+ uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
+ uint blk_num;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ ttl_len = xfred_len = 0;
+ /* at least 4 bytes alignment of skb buff is guaranteed */
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
+ ttl_len += PKTLEN(sd->osh, pnext);
+
+ if (!sd->use_rxchain || ttl_len <= sd->client_block_size[func]) {
+ blk_num = 0;
+ dma_len = 0;
+ } else {
+ blk_num = ttl_len / sd->client_block_size[func];
+ dma_len = blk_num * sd->client_block_size[func];
+ }
+ lft_len = ttl_len - dma_len;
+
+ sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
+ __FUNCTION__, write ? "W" : "R",
+ ttl_len, func, addr, blk_num, lft_len));
+
+ if (0 != dma_len) {
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ /* Set up DMA descriptors */
+ pprev = pkt;
+ for (pnext = pkt;
+ pnext && dma_len;
+ pnext = PKTNEXT(sd->osh, pnext)) {
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (dma_len > pkt_len)
+ dma_len -= pkt_len;
+ else {
+ pkt_len = xfred_len = dma_len;
+ dma_len = 0;
+ pkt = pnext;
+ }
+
+ sg_set_buf(&sd->sg_list[SGCount++],
+ (uint8*)PKTDATA(sd->osh, pnext),
+ pkt_len);
+
+ if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
+ sd_err(("%s: sg list entries exceed limit\n",
+ __FUNCTION__));
+ return (SDIOH_API_RC_FAIL);
+ }
+ }
+
+ mmc_dat.sg = sd->sg_list;
+ mmc_dat.sg_len = SGCount;
+ mmc_dat.blksz = sd->client_block_size[func];
+ mmc_dat.blocks = blk_num;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.arg = write ? 1<<31 : 0;
+ mmc_cmd.arg |= (func & 0x7) << 28;
+ mmc_cmd.arg |= 1<<27;
+ mmc_cmd.arg |= fifo ? 0 : 1<<26;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+ mmc_cmd.arg |= blk_num & 0x1FF;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
+ sdio_claim_host(gInstance->func[func]);
+ mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card);
+ mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req);
+ sdio_release_host(gInstance->func[func]);
+
+ err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+ if (0 != err_ret) {
+ sd_err(("%s:CMD53 %s failed with code %d\n",
+ __FUNCTION__,
+ write ? "write" : "read",
+ err_ret));
+ sd_err(("%s:Disabling rxchain and fire it with PIO\n",
+ __FUNCTION__));
+ sd->use_rxchain = FALSE;
+ pkt = pprev;
+ lft_len = ttl_len;
+ } else if (!fifo) {
+ addr = addr + ttl_len - lft_len - dma_len;
+ }
+ }
+
+ /* PIO mode */
+ if (0 != lft_len) {
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
+ xfred_len;
+ pkt_len = PKTLEN(sd->osh, pnext);
+ if (0 != xfred_len) {
+ pkt_len -= xfred_len;
+ xfred_len = 0;
+ }
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+#ifdef CONFIG_MMC_MSM7X00A
+ if ((pkt_len % 64) == 32) {
+ sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+ pkt_len += 32;
+ }
+#endif /* CONFIG_MMC_MSM7X00A */
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(
+ gInstance->func[func],
+ addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(
+ gInstance->func[func],
+ addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(
+ gInstance->func[func],
+ buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(
+ gInstance->func[func],
+ buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ SGCount ++;
+ }
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC Status;
+ void *mypkt = NULL;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ sd_data(("%s: Creating new %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, buflen_u));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+ }
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+ /* Case 2: We have a packet, but it is unaligned. */
+
+ /* In this case, we cannot have a chain. */
+ ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+ sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, PKTLEN(sd->osh, pkt)));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(PKTDATA(sd->osh, pkt),
+ PKTDATA(sd->osh, mypkt),
+ PKTLEN(sd->osh, pkt));
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt),
+ PKTDATA(sd->osh, pkt),
+ PKTLEN(sd->osh, mypkt));
+ }
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ } else { /* case 3: We have a packet and it is aligned. */
+ sd_data(("%s: Aligned %s Packet, direct DMA\n",
+ __FUNCTION__, write ? "Tx" : "Rx"));
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+ }
+
+ return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ sdio_release_host(gInstance->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ BCM_REFERENCE(sd);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+ int ret;
+ sdioh_info_t *sd = gInstance->sd;
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (gInstance->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+ if ((ret = sdio_reset_comm(gInstance->func[0]->card))) {
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ if (sdio_set_block_size(gInstance->func[1], 64)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ if (sdio_set_block_size(gInstance->func[2],
+ sd_f2_blocksize)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d\n", sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+ if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[2]);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+ return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 000000000000..9d2b1d3067b0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,402 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 312783 2012-02-03 22:53:56Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239 43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+#ifdef WL_CFG80211
+extern void wl_cfg80211_set_parent_dev(void *dev);
+#endif
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern int dhd_os_check_wakelock(void *dhdp);
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+extern volatile bool dhd_mmc_suspend;
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ static struct sdio_func sdio_func_0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_trace(("sdio_device: 0x%04x\n", func->device));
+ sd_trace(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 1) {
+ sdio_func_0.num = 0;
+ sdio_func_0.card = func->card;
+ gInstance->func[0] = &sdio_func_0;
+ if(func->device == 0x4) { /* 4318 */
+ gInstance->func[2] = NULL;
+ sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+ }
+
+ gInstance->func[func->num] = func;
+
+ if (func->num == 2) {
+#ifdef WL_CFG80211
+ wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+ sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+ bcmsdh_remove(&func->dev);
+ } else if (func->num == 1) {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ gInstance->func[1] = NULL;
+ }
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+ mmc_pm_flag_t sdio_flags;
+ int ret;
+
+ if (func->num != 2)
+ return 0;
+
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+ if (dhd_os_check_wakelock(bcmsdh_get_drvdata()))
+ return -EBUSY;
+ sdio_flags = sdio_get_host_pm_caps(func);
+
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ /* keep power while host suspended */
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret) {
+ sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(0);
+#endif /* defined(OOB_INTR_ONLY) */
+ dhd_mmc_suspend = TRUE;
+ smp_mb();
+
+ return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+#if defined(OOB_INTR_ONLY)
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+#endif
+ sd_trace(("%s Enter\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+ if ((func->num == 2) && dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+
+ smp_mb();
+ return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+ .suspend = bcmsdh_sdmmc_suspend,
+ .resume = bcmsdh_sdmmc_resume,
+};
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ if (notify_semaphore)
+ up(notify_semaphore);
+ return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .name = "dummy_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+ };
+
+int sdio_func_reg_notify(void* semaphore)
+{
+ notify_semaphore = semaphore;
+ return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+ sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+ .drv = {
+ .pm = &bcmsdh_sdmmc_pm_ops,
+ },
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+ };
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable) {
+ sdioh_sdmmc_devintr_on(sd);
+ } else {
+ sdioh_sdmmc_devintr_off(sd);
+ }
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+ gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+ if (!gInstance)
+ return -ENOMEM;
+
+ error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+ if (gInstance)
+ kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 000000000000..0d92efc6a62a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,2093 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c 312855 2012-02-04 02:01:18Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+void *_bcmutils_dummy_fn = NULL;
+
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(buf, PKTDATA(osh, p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+ int len;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p)) {
+ len = PKTLEN(osh, p);
+ total += len;
+ }
+
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p))
+ cnt++;
+
+ return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+ uint cnt;
+ uint8 *pktdata;
+ uint len, remain, align64;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+ cnt++;
+ len = PKTLEN(osh, p);
+ if (len > 128) {
+ pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */
+ /* Check for page boundary straddle (2048B) */
+ if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+ cnt++;
+
+ align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */
+ align64 = (64 - align64) & 0x3f;
+ len -= align64; /* bytes from aligned 64B to end */
+ /* if aligned to 128B, check for MOD 128 between 1 to 4B */
+ remain = len % 128;
+ if (remain > 0 && remain <= 4)
+ cnt++; /* add extra seg */
+ }
+ }
+
+ return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktoffset(osl_t *osh, void *p, uint offset)
+{
+ uint total = pkttotlen(osh, p);
+ uint pkt_off = 0, len = 0;
+ uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+ if (offset > total)
+ return NULL;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ pdata = (uint8 *) PKTDATA(osh, p);
+ pkt_off = offset - len;
+ len += PKTLEN(osh, p);
+ if (len > offset)
+ break;
+ }
+ return (uint8*) (pdata+pkt_off);
+}
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if (prev_p == NULL)
+ return NULL;
+
+ if ((p = PKTLINK(prev_p)) == NULL)
+ return NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(prev_p, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (prec < pq->num_prec)
+ pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+ int prec;
+
+ /* Optimize flush, if pktq len = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->len == 0) {
+ return;
+ }
+
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir, fn, arg);
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ {
+ return NULL;
+ }
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+#endif /* BCMDRIVER */
+
+const unsigned char bcm_ctype[] = {
+
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+ ulong result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus)
+ return (ulong)-1;
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(-(long)result);
+
+ if (endp)
+ *endp = DISCARD_QUAL(cp, char);
+
+ return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+ int len, nlen;
+ int i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return DISCARD_QUAL(haystack, char);
+
+ nlen = strlen(needle);
+ len = strlen(haystack) - nlen + 1;
+
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return DISCARD_QUAL(&haystack[i], char);
+ return (NULL);
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char*)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char*)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char*)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
+ }
+}
+
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+ int i = 0;
+ char *ep;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ p = ep;
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
+
+ if (abuflen == 0)
+ return 0;
+
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
+
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
+
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char hex[] =
+ {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ const uint8 *octet = ea->octet;
+ char *p = buf;
+ int i;
+
+ for (i = 0; i < 6; i++, octet++) {
+ *p++ = hex[(*octet >> 4) & 0xf];
+ *p++ = hex[*octet & 0xf];
+ *p++ = ':';
+ }
+
+ *(p-1) = '\0';
+
+ return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
+
+ pktdata = (uint8 *)PKTDATA(NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+ } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ rc |= PKTPRIO_DSCP;
+ }
+
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
+
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#endif /* BCMDRIVER */
+
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+static const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+ uint8 *pend;
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ return crc;
+}
+
+#ifdef notdef
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
+ }
+
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (TLV_HDR_LEN + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) &&
+ (totlen >= (len + TLV_HDR_LEN)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) &&
+ (totlen >= (len + TLV_HDR_LEN)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+ return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+ int i;
+ char* p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ uint32 bit;
+ const char* name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+#endif
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "AES_OCB_MSDU",
+ "AES_OCB_MPDU",
+ "NALG"
+ "UNDEF",
+ "UNDEF",
+ "UNDEF",
+#ifdef BCMWAPI_WPI
+ "WAPI",
+#endif /* BCMWAPI_WPI */
+ "UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary) break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ /* r == 0 is also the case when strlen(fmt) is zero.
+ * typically the case when "" is passed as argument.
+ */
+ if ((r == -1) || (r >= (int)b->size)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+{
+ int i;
+
+ if (msg != NULL && msg[0] != '\0')
+ bcm_bprintf(b, "%s", msg);
+ for (i = 0; i < len; i ++)
+ bcm_bprintf(b, "%02X", buf[i]);
+ if (newline)
+ bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+ int i;
+ int per_line = 0;
+
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ unsigned int buf_len, n;
+ unsigned int pad = 0;
+
+ dp = varbuf;
+
+ findNewline = FALSE;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = (unsigned int)(dp - varbuf);
+ if (buf_len % 4) {
+ pad = 4 - buf_len % 4;
+ if (pad && (buf_len + pad <= len)) {
+ buf_len += pad;
+ }
+ }
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
new file mode 100644
index 000000000000..0a570f66af22
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -0,0 +1,936 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif
+
+#ifdef _bcmwifi_c_
+
+#include <bcmwifi.h>
+#else
+#include <bcmwifi_channels.h>
+#endif
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h>
+#endif
+
+#ifndef D11AC_IOTYPES
+
+
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band, *bw, *sb;
+ uint channel;
+
+ band = "";
+ bw = "";
+ sb = "";
+ channel = CHSPEC_CHANNEL(chspec);
+
+ if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+ if (CHSPEC_IS40(chspec)) {
+ if (CHSPEC_SB_UPPER(chspec)) {
+ sb = "u";
+ channel += CH_10MHZ_APART;
+ } else {
+ sb = "l";
+ channel -= CH_10MHZ_APART;
+ }
+ } else if (CHSPEC_IS10(chspec)) {
+ bw = "n";
+ }
+
+
+ snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+ return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ char *endp = NULL;
+ uint channel, band, bw, ctl_sb;
+ char c;
+
+ channel = strtoul(a, &endp, 10);
+
+
+ if (endp == a)
+ return 0;
+
+ if (channel > MAXCHANNEL)
+ return 0;
+
+ band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+ a = endp;
+
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+
+
+ if (c == 'a' || c == 'b') {
+ band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+ a++;
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+ }
+
+
+ if (c == 'n') {
+ bw = WL_CHANSPEC_BW_10;
+ } else if (c == 'l') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+
+ if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+ channel += CH_10MHZ_APART;
+ else
+ return 0;
+ } else if (c == 'u') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+
+ if (channel > CH_20MHZ_APART)
+ channel -= CH_10MHZ_APART;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+
+done:
+ return (channel | band | bw | ctl_sb);
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return TRUE;
+
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return TRUE;
+
+
+ if (CHSPEC_IS20(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return TRUE;
+ } else {
+ if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint8 ctl_chan;
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+
+ ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+
+ ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER);
+
+ ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ }
+
+ return ctl_chan;
+}
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = 0;
+ uint8 channel;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return chspec;
+ } else {
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+ channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+#else
+
+
+
+
+
+
+static const char *wf_chspec_bw_str[] =
+{
+ "5",
+ "10",
+ "20",
+ "40",
+ "80",
+ "160",
+ "80+80",
+ "na"
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+ (sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+ (sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+ (sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+ (sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+
+
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+ uint bw;
+
+ bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+ return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+
+static uint8
+center_chan_to_edge(uint bw)
+{
+
+ return (uint8)(((bw - 20) / 2) / 5);
+}
+
+
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+ return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+ uint lowest = channel_low_edge(center_ch, bw);
+ uint sb;
+
+ if ((ctl_ch - lowest) % 4) {
+
+ return -1;
+ }
+
+ sb = ((ctl_ch - lowest) / 4);
+
+
+ if (sb >= (bw / 20)) {
+
+ return -1;
+ }
+
+ return sb;
+}
+
+
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+ return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+
+static int
+channel_80mhz_to_id(uint ch)
+{
+ uint i;
+ for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+ if (ch == wf_5g_80m_chans[i])
+ return i;
+ }
+
+ return -1;
+}
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band;
+ uint ctl_chan;
+
+ if (wf_chspec_malformed(chspec))
+ return NULL;
+
+ band = "";
+
+
+ if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+
+ ctl_chan = wf_chspec_ctlchan(chspec);
+
+
+ if (CHSPEC_IS20(chspec)) {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+ } else if (!CHSPEC_IS8080(chspec)) {
+ const char *bw;
+ const char *sb = "";
+
+ bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+
+ if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ }
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+
+ if (CHSPEC_IS40(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+ } else {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+ }
+#endif
+
+ } else {
+
+ uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+ uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+
+ chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+ chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+ }
+
+ return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+ unsigned long val;
+ char *endp = NULL;
+
+ val = strtoul(*p, &endp, 10);
+
+ if (endp == *p)
+ return 0;
+
+
+ *p = endp;
+
+ *num = (unsigned int)val;
+
+ return 1;
+}
+
+
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ chanspec_t chspec;
+ uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+ uint num, ctl_ch;
+ uint ch1, ch2;
+ char c, sb_ul = '\0';
+ int i;
+
+ bw = 20;
+ chspec_sb = 0;
+ chspec_ch = ch1 = ch2 = 0;
+
+
+ if (!read_uint(&a, &num))
+ return 0;
+
+
+ c = tolower(a[0]);
+ if (c == 'g') {
+ a ++;
+
+
+ if (num == 2)
+ chspec_band = WL_CHANSPEC_BAND_2G;
+ else if (num == 5)
+ chspec_band = WL_CHANSPEC_BAND_5G;
+ else
+ return 0;
+
+
+ if (!read_uint(&a, &ctl_ch))
+ return 0;
+
+ c = tolower(a[0]);
+ }
+ else {
+
+ ctl_ch = num;
+ chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ }
+
+ if (c == '\0') {
+
+ chspec_bw = WL_CHANSPEC_BW_20;
+ goto done_read;
+ }
+
+ a ++;
+
+
+ if (c == 'u' || c == 'l') {
+ sb_ul = c;
+ chspec_bw = WL_CHANSPEC_BW_40;
+ goto done_read;
+ }
+
+
+ if (c != '/')
+ return 0;
+
+
+ if (!read_uint(&a, &bw))
+ return 0;
+
+
+ if (bw == 20) {
+ chspec_bw = WL_CHANSPEC_BW_20;
+ } else if (bw == 40) {
+ chspec_bw = WL_CHANSPEC_BW_40;
+ } else if (bw == 80) {
+ chspec_bw = WL_CHANSPEC_BW_80;
+ } else if (bw == 160) {
+ chspec_bw = WL_CHANSPEC_BW_160;
+ } else {
+ return 0;
+ }
+
+
+
+ c = tolower(a[0]);
+
+
+ if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+ if (c == 'u' || c == 'l') {
+ a ++;
+ sb_ul = c;
+ goto done_read;
+ }
+ }
+
+
+ if (c == '+') {
+
+ static const char *plus80 = "80/";
+
+
+ chspec_bw = WL_CHANSPEC_BW_8080;
+
+ a ++;
+
+
+ for (i = 0; i < 3; i++) {
+ if (*a++ != *plus80++) {
+ return 0;
+ }
+ }
+
+
+ if (!read_uint(&a, &ch1))
+ return 0;
+
+
+ if (a[0] != '-')
+ return 0;
+ a ++;
+
+
+ if (!read_uint(&a, &ch2))
+ return 0;
+ }
+
+done_read:
+
+ while (a[0] == ' ') {
+ a ++;
+ }
+
+
+ if (a[0] != '\0')
+ return 0;
+
+
+
+
+ if (sb_ul != '\0') {
+ if (sb_ul == 'l') {
+ chspec_ch = UPPER_20_SB(ctl_ch);
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+ } else if (sb_ul == 'u') {
+ chspec_ch = LOWER_20_SB(ctl_ch);
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+ }
+ }
+
+ else if (chspec_bw == WL_CHANSPEC_BW_20) {
+ chspec_ch = ctl_ch;
+ chspec_sb = 0;
+ }
+
+ else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+
+ const uint8 *center_ch = NULL;
+ int num_ch = 0;
+ int sb = -1;
+
+ if (chspec_bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < num_ch; i ++) {
+ sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+ if (sb >= 0) {
+ chspec_ch = center_ch[i];
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ break;
+ }
+ }
+
+
+ if (sb < 0) {
+ return 0;
+ }
+ }
+
+ else {
+ int ch1_id = 0, ch2_id = 0;
+ int sb;
+
+ ch1_id = channel_80mhz_to_id(ch1);
+ ch2_id = channel_80mhz_to_id(ch2);
+
+
+ if (ch1 >= ch2 || ch1_id < 0 || ch2_id < 0)
+ return 0;
+
+
+ chspec_ch = (((uint16)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+ ((uint16)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+
+
+
+ sb = channel_to_sb(ch1, ctl_ch, bw);
+ if (sb < 0) {
+
+ sb = channel_to_sb(ch2, ctl_ch, bw);
+ if (sb < 0) {
+
+ return 0;
+ }
+
+ sb += 4;
+ }
+
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ }
+
+ chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+ if (wf_chspec_malformed(chspec))
+ return 0;
+
+ return chspec;
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+ uint chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+
+ if (CHSPEC_IS2G(chanspec)) {
+
+ if (chspec_bw != WL_CHANSPEC_BW_20 &&
+ chspec_bw != WL_CHANSPEC_BW_40) {
+ return TRUE;
+ }
+ } else if (CHSPEC_IS5G(chanspec)) {
+ if (chspec_bw == WL_CHANSPEC_BW_8080) {
+ uint ch1_id, ch2_id;
+
+
+ ch1_id = CHSPEC_CHAN1(chanspec);
+ ch2_id = CHSPEC_CHAN2(chanspec);
+ if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+ return TRUE;
+
+
+ if (ch2_id <= ch1_id)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+ chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+ if (chspec_ch > MAXCHANNEL) {
+ return TRUE;
+ }
+ } else {
+
+ return TRUE;
+ }
+ } else {
+
+ return TRUE;
+ }
+
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+ if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+ uint chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+ if (wf_chspec_malformed(chanspec))
+ return FALSE;
+
+ if (CHSPEC_IS2G(chanspec)) {
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (chspec_ch >= 1 && chspec_ch <= 14)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+ if (chspec_ch >= 3 && chspec_ch <= 11)
+ return TRUE;
+ }
+ } else if (CHSPEC_IS5G(chanspec)) {
+ if (chspec_bw == WL_CHANSPEC_BW_8080) {
+ uint16 ch1, ch2;
+
+ ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+ ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+
+ if (ch2 > ch1 + CH_80MHZ_APART)
+ return TRUE;
+ } else {
+ const uint8 *center_ch;
+ uint num_ch, i;
+
+ if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else {
+
+ return FALSE;
+ }
+
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+
+ for (i = 0; i < num_ch; i ++) {
+ if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+ chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+ break;
+ }
+
+ if (i == num_ch) {
+
+ if (chspec_ch == 34 || chspec_ch == 38 ||
+ chspec_ch == 42 || chspec_ch == 46)
+ i = 0;
+ }
+ } else {
+
+ for (i = 0; i < num_ch; i ++) {
+ if (chspec_ch == center_ch[i])
+ break;
+ }
+ }
+
+ if (i < num_ch) {
+
+ return TRUE;
+ }
+ }
+ }
+
+ return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint center_chan;
+ uint bw_mhz;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (CHSPEC_IS20(chspec)) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+ sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+ if (CHSPEC_IS8080(chspec)) {
+ bw_mhz = 80;
+
+ if (sb < 4) {
+ center_chan = CHSPEC_CHAN1(chspec);
+ }
+ else {
+ center_chan = CHSPEC_CHAN2(chspec);
+ sb -= 4;
+ }
+
+
+ center_chan = wf_5g_80m_chans[center_chan];
+ }
+ else {
+ bw_mhz = bw_chspec_to_mhz(chspec);
+ center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+ }
+
+ return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+ }
+}
+
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = chspec;
+ uint8 ctl_chan;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (!CHSPEC_IS20(chspec)) {
+ ctl_chan = wf_chspec_ctlchan(chspec);
+ ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+#endif
+
+
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec40 = chspec;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ if (CHSPEC_IS80(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+ sb = CHSPEC_CTL_SB(chspec);
+
+ if (sb == WL_CHANSPEC_CTL_SB_UL) {
+
+ sb = WL_CHANSPEC_CTL_SB_L;
+ center_chan += CH_20MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UU) {
+
+ sb = WL_CHANSPEC_CTL_SB_U;
+ center_chan += CH_20MHZ_APART;
+ } else {
+
+
+ center_chan -= CH_20MHZ_APART;
+ }
+
+
+ chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+ sb | center_chan);
+ }
+
+ return chspec40;
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+
+ if (offset != (ch * 5))
+ return -1;
+
+
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (ch > 200))
+ freq = -1;
+ else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+ freq = 2484;
+ else
+ freq = ch * 5 + start_factor / 2;
+
+ return freq;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
new file mode 100644
index 000000000000..c7970474128f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
@@ -0,0 +1,345 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _bcmwifi_channels_h_
+#define _bcmwifi_channels_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_80MHZ_APART 16
+#define CH_40MHZ_APART 8
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1
+#define CH_MAX_2G_CHANNEL 14
+#define MAXCHANNEL 224
+#define CHSPEC_CTLOVLP(sp1, sp2, sep) ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)
+
+
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_CHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_CHANSPEC_BW_MASK 0x0C00
+#define WL_CHANSPEC_BW_SHIFT 10
+#define WL_CHANSPEC_BW_10 0x0400
+#define WL_CHANSPEC_BW_20 0x0800
+#define WL_CHANSPEC_BW_40 0x0C00
+
+#define WL_CHANSPEC_BAND_MASK 0xf000
+#define WL_CHANSPEC_BAND_SHIFT 12
+#define WL_CHANSPEC_BAND_5G 0x1000
+#define WL_CHANSPEC_BAND_2G 0x2000
+#define INVCHANSPEC 255
+
+
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+
+
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif
+
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
+ (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+ (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN 8
+
+#else
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+#define WL_CHANSPEC_CHAN1_MASK 0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT 0
+#define WL_CHANSPEC_CHAN2_MASK 0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT 4
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LLL 0x0000
+#define WL_CHANSPEC_CTL_SB_LLU 0x0100
+#define WL_CHANSPEC_CTL_SB_LUL 0x0200
+#define WL_CHANSPEC_CTL_SB_LUU 0x0300
+#define WL_CHANSPEC_CTL_SB_ULL 0x0400
+#define WL_CHANSPEC_CTL_SB_ULU 0x0500
+#define WL_CHANSPEC_CTL_SB_UUL 0x0600
+#define WL_CHANSPEC_CTL_SB_UUU 0x0700
+#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
+
+#define WL_CHANSPEC_BW_MASK 0x3800
+#define WL_CHANSPEC_BW_SHIFT 11
+#define WL_CHANSPEC_BW_5 0x0000
+#define WL_CHANSPEC_BW_10 0x0800
+#define WL_CHANSPEC_BW_20 0x1000
+#define WL_CHANSPEC_BW_40 0x1800
+#define WL_CHANSPEC_BW_80 0x2000
+#define WL_CHANSPEC_BW_160 0x2800
+#define WL_CHANSPEC_BW_8080 0x3000
+
+#define WL_CHANSPEC_BAND_MASK 0xc000
+#define WL_CHANSPEC_BAND_SHIFT 14
+#define WL_CHANSPEC_BAND_2G 0x0000
+#define WL_CHANSPEC_BAND_3G 0x4000
+#define WL_CHANSPEC_BAND_4G 0x8000
+#define WL_CHANSPEC_BAND_5G 0xc000
+#define INVCHANSPEC 255
+
+
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \
+ ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK)
+#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK)
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) 0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) 0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#endif
+
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+
+#define CHANSPEC_STR_LEN 20
+
+
+
+#define WL_LCHANSPEC_CHAN_MASK 0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT 0
+
+#define WL_LCHANSPEC_CTL_SB_MASK 0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT 8
+#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_LCHANSPEC_BW_MASK 0x0C00
+#define WL_LCHANSPEC_BW_SHIFT 10
+#define WL_LCHANSPEC_BW_10 0x0400
+#define WL_LCHANSPEC_BW_20 0x0800
+#define WL_LCHANSPEC_BW_40 0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK 0xf000
+#define WL_LCHANSPEC_BAND_SHIFT 12
+#define WL_LCHANSPEC_BAND_5G 0x1000
+#define WL_LCHANSPEC_BAND_2G 0x2000
+
+#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#endif
+
+
+
+
+#define WF_CHAN_FACTOR_2_4_G 4814
+
+
+#define WF_CHAN_FACTOR_5_G 10000
+
+
+#define WF_CHAN_FACTOR_4_G 8000
+
+
+#define WLC_MAXRATE 108
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+#define WLC_2G_25MHZ_OFFSET 5
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(const char *a);
+
+
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
new file mode 100644
index 000000000000..9896b2360a6a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
@@ -0,0 +1,306 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_rates.h 252708 2011-04-12 06:45:56Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define WL_RATESET_SZ_DSSS 4
+#define WL_RATESET_SZ_OFDM 8
+#define WL_RATESET_SZ_HT_MCS 8
+#define WL_RATESET_SZ_VHT_MCS 10
+
+#define WL_TX_CHAINS_MAX 3
+
+#define WL_RATE_DISABLED (-128)
+
+
+typedef enum wl_tx_bw {
+ WL_TX_BW_20,
+ WL_TX_BW_40,
+ WL_TX_BW_80,
+ WL_TX_BW_20IN40,
+ WL_TX_BW_20IN80,
+ WL_TX_BW_40IN80,
+ WL_TX_BW_ALL
+} wl_tx_bw_t;
+
+
+
+typedef enum wl_tx_mode {
+ WL_TX_MODE_NONE,
+ WL_TX_MODE_STBC,
+ WL_TX_MODE_CDD,
+ WL_TX_MODE_SDM
+} wl_tx_mode_t;
+
+
+
+typedef enum wl_tx_chains {
+ WL_TX_CHAINS_1 = 1,
+ WL_TX_CHAINS_2,
+ WL_TX_CHAINS_3
+} wl_tx_chains_t;
+
+
+
+typedef enum wl_tx_nss {
+ WL_TX_NSS_1 = 1,
+ WL_TX_NSS_2,
+ WL_TX_NSS_3
+} wl_tx_nss_t;
+
+
+typedef enum clm_rates {
+
+
+
+ WL_RATE_1X1_DSSS_1 = 0,
+ WL_RATE_1X1_DSSS_2 = 1,
+ WL_RATE_1X1_DSSS_5_5 = 2,
+ WL_RATE_1X1_DSSS_11 = 3,
+
+ WL_RATE_1X1_OFDM_6 = 4,
+ WL_RATE_1X1_OFDM_9 = 5,
+ WL_RATE_1X1_OFDM_12 = 6,
+ WL_RATE_1X1_OFDM_18 = 7,
+ WL_RATE_1X1_OFDM_24 = 8,
+ WL_RATE_1X1_OFDM_36 = 9,
+ WL_RATE_1X1_OFDM_48 = 10,
+ WL_RATE_1X1_OFDM_54 = 11,
+
+ WL_RATE_1X1_MCS0 = 12,
+ WL_RATE_1X1_MCS1 = 13,
+ WL_RATE_1X1_MCS2 = 14,
+ WL_RATE_1X1_MCS3 = 15,
+ WL_RATE_1X1_MCS4 = 16,
+ WL_RATE_1X1_MCS5 = 17,
+ WL_RATE_1X1_MCS6 = 18,
+ WL_RATE_1X1_MCS7 = 19,
+
+ WL_RATE_1X1_VHT0SS1 = 12,
+ WL_RATE_1X1_VHT1SS1 = 13,
+ WL_RATE_1X1_VHT2SS1 = 14,
+ WL_RATE_1X1_VHT3SS1 = 15,
+ WL_RATE_1X1_VHT4SS1 = 16,
+ WL_RATE_1X1_VHT5SS1 = 17,
+ WL_RATE_1X1_VHT6SS1 = 18,
+ WL_RATE_1X1_VHT7SS1 = 19,
+ WL_RATE_1X1_VHT8SS1 = 20,
+ WL_RATE_1X1_VHT9SS1 = 21,
+
+
+
+
+
+ WL_RATE_1X2_DSSS_1 = 22,
+ WL_RATE_1X2_DSSS_2 = 23,
+ WL_RATE_1X2_DSSS_5_5 = 24,
+ WL_RATE_1X2_DSSS_11 = 25,
+
+ WL_RATE_1X2_CDD_OFDM_6 = 26,
+ WL_RATE_1X2_CDD_OFDM_9 = 27,
+ WL_RATE_1X2_CDD_OFDM_12 = 28,
+ WL_RATE_1X2_CDD_OFDM_18 = 29,
+ WL_RATE_1X2_CDD_OFDM_24 = 30,
+ WL_RATE_1X2_CDD_OFDM_36 = 31,
+ WL_RATE_1X2_CDD_OFDM_48 = 32,
+ WL_RATE_1X2_CDD_OFDM_54 = 33,
+
+ WL_RATE_1X2_CDD_MCS0 = 34,
+ WL_RATE_1X2_CDD_MCS1 = 35,
+ WL_RATE_1X2_CDD_MCS2 = 36,
+ WL_RATE_1X2_CDD_MCS3 = 37,
+ WL_RATE_1X2_CDD_MCS4 = 38,
+ WL_RATE_1X2_CDD_MCS5 = 39,
+ WL_RATE_1X2_CDD_MCS6 = 40,
+ WL_RATE_1X2_CDD_MCS7 = 41,
+
+ WL_RATE_1X2_VHT0SS1 = 34,
+ WL_RATE_1X2_VHT1SS1 = 35,
+ WL_RATE_1X2_VHT2SS1 = 36,
+ WL_RATE_1X2_VHT3SS1 = 37,
+ WL_RATE_1X2_VHT4SS1 = 38,
+ WL_RATE_1X2_VHT5SS1 = 39,
+ WL_RATE_1X2_VHT6SS1 = 40,
+ WL_RATE_1X2_VHT7SS1 = 41,
+ WL_RATE_1X2_VHT8SS1 = 42,
+ WL_RATE_1X2_VHT9SS1 = 43,
+
+
+ WL_RATE_2X2_STBC_MCS0 = 44,
+ WL_RATE_2X2_STBC_MCS1 = 45,
+ WL_RATE_2X2_STBC_MCS2 = 46,
+ WL_RATE_2X2_STBC_MCS3 = 47,
+ WL_RATE_2X2_STBC_MCS4 = 48,
+ WL_RATE_2X2_STBC_MCS5 = 49,
+ WL_RATE_2X2_STBC_MCS6 = 50,
+ WL_RATE_2X2_STBC_MCS7 = 51,
+
+ WL_RATE_2X2_STBC_VHT0SS1 = 44,
+ WL_RATE_2X2_STBC_VHT1SS1 = 45,
+ WL_RATE_2X2_STBC_VHT2SS1 = 46,
+ WL_RATE_2X2_STBC_VHT3SS1 = 47,
+ WL_RATE_2X2_STBC_VHT4SS1 = 48,
+ WL_RATE_2X2_STBC_VHT5SS1 = 49,
+ WL_RATE_2X2_STBC_VHT6SS1 = 50,
+ WL_RATE_2X2_STBC_VHT7SS1 = 51,
+ WL_RATE_2X2_STBC_VHT8SS1 = 52,
+ WL_RATE_2X2_STBC_VHT9SS1 = 53,
+
+ WL_RATE_2X2_SDM_MCS8 = 54,
+ WL_RATE_2X2_SDM_MCS9 = 55,
+ WL_RATE_2X2_SDM_MCS10 = 56,
+ WL_RATE_2X2_SDM_MCS11 = 57,
+ WL_RATE_2X2_SDM_MCS12 = 58,
+ WL_RATE_2X2_SDM_MCS13 = 59,
+ WL_RATE_2X2_SDM_MCS14 = 60,
+ WL_RATE_2X2_SDM_MCS15 = 61,
+
+ WL_RATE_2X2_VHT0SS2 = 54,
+ WL_RATE_2X2_VHT1SS2 = 55,
+ WL_RATE_2X2_VHT2SS2 = 56,
+ WL_RATE_2X2_VHT3SS2 = 57,
+ WL_RATE_2X2_VHT4SS2 = 58,
+ WL_RATE_2X2_VHT5SS2 = 59,
+ WL_RATE_2X2_VHT6SS2 = 60,
+ WL_RATE_2X2_VHT7SS2 = 61,
+ WL_RATE_2X2_VHT8SS2 = 62,
+ WL_RATE_2X2_VHT9SS2 = 63,
+
+
+
+
+
+ WL_RATE_1X3_DSSS_1 = 64,
+ WL_RATE_1X3_DSSS_2 = 65,
+ WL_RATE_1X3_DSSS_5_5 = 66,
+ WL_RATE_1X3_DSSS_11 = 67,
+
+ WL_RATE_1X3_CDD_OFDM_6 = 68,
+ WL_RATE_1X3_CDD_OFDM_9 = 69,
+ WL_RATE_1X3_CDD_OFDM_12 = 70,
+ WL_RATE_1X3_CDD_OFDM_18 = 71,
+ WL_RATE_1X3_CDD_OFDM_24 = 72,
+ WL_RATE_1X3_CDD_OFDM_36 = 73,
+ WL_RATE_1X3_CDD_OFDM_48 = 74,
+ WL_RATE_1X3_CDD_OFDM_54 = 75,
+
+ WL_RATE_1X3_CDD_MCS0 = 76,
+ WL_RATE_1X3_CDD_MCS1 = 77,
+ WL_RATE_1X3_CDD_MCS2 = 78,
+ WL_RATE_1X3_CDD_MCS3 = 79,
+ WL_RATE_1X3_CDD_MCS4 = 80,
+ WL_RATE_1X3_CDD_MCS5 = 81,
+ WL_RATE_1X3_CDD_MCS6 = 82,
+ WL_RATE_1X3_CDD_MCS7 = 83,
+
+ WL_RATE_1X3_VHT0SS1 = 76,
+ WL_RATE_1X3_VHT1SS1 = 77,
+ WL_RATE_1X3_VHT2SS1 = 78,
+ WL_RATE_1X3_VHT3SS1 = 79,
+ WL_RATE_1X3_VHT4SS1 = 80,
+ WL_RATE_1X3_VHT5SS1 = 81,
+ WL_RATE_1X3_VHT6SS1 = 82,
+ WL_RATE_1X3_VHT7SS1 = 83,
+ WL_RATE_1X3_VHT8SS1 = 84,
+ WL_RATE_1X3_VHT9SS1 = 85,
+
+
+ WL_RATE_2X3_STBC_MCS0 = 86,
+ WL_RATE_2X3_STBC_MCS1 = 87,
+ WL_RATE_2X3_STBC_MCS2 = 88,
+ WL_RATE_2X3_STBC_MCS3 = 89,
+ WL_RATE_2X3_STBC_MCS4 = 90,
+ WL_RATE_2X3_STBC_MCS5 = 91,
+ WL_RATE_2X3_STBC_MCS6 = 92,
+ WL_RATE_2X3_STBC_MCS7 = 93,
+
+ WL_RATE_2X3_STBC_VHT0SS1 = 86,
+ WL_RATE_2X3_STBC_VHT1SS1 = 87,
+ WL_RATE_2X3_STBC_VHT2SS1 = 88,
+ WL_RATE_2X3_STBC_VHT3SS1 = 89,
+ WL_RATE_2X3_STBC_VHT4SS1 = 90,
+ WL_RATE_2X3_STBC_VHT5SS1 = 91,
+ WL_RATE_2X3_STBC_VHT6SS1 = 92,
+ WL_RATE_2X3_STBC_VHT7SS1 = 93,
+ WL_RATE_2X3_STBC_VHT8SS1 = 94,
+ WL_RATE_2X3_STBC_VHT9SS1 = 95,
+
+ WL_RATE_2X3_SDM_MCS8 = 96,
+ WL_RATE_2X3_SDM_MCS9 = 97,
+ WL_RATE_2X3_SDM_MCS10 = 98,
+ WL_RATE_2X3_SDM_MCS11 = 99,
+ WL_RATE_2X3_SDM_MCS12 = 100,
+ WL_RATE_2X3_SDM_MCS13 = 101,
+ WL_RATE_2X3_SDM_MCS14 = 102,
+ WL_RATE_2X3_SDM_MCS15 = 103,
+
+ WL_RATE_2X3_VHT0SS2 = 96,
+ WL_RATE_2X3_VHT1SS2 = 97,
+ WL_RATE_2X3_VHT2SS2 = 98,
+ WL_RATE_2X3_VHT3SS2 = 99,
+ WL_RATE_2X3_VHT4SS2 = 100,
+ WL_RATE_2X3_VHT5SS2 = 101,
+ WL_RATE_2X3_VHT6SS2 = 102,
+ WL_RATE_2X3_VHT7SS2 = 103,
+ WL_RATE_2X3_VHT8SS2 = 104,
+ WL_RATE_2X3_VHT9SS2 = 105,
+
+
+ WL_RATE_3X3_SDM_MCS16 = 106,
+ WL_RATE_3X3_SDM_MCS17 = 107,
+ WL_RATE_3X3_SDM_MCS18 = 108,
+ WL_RATE_3X3_SDM_MCS19 = 109,
+ WL_RATE_3X3_SDM_MCS20 = 110,
+ WL_RATE_3X3_SDM_MCS21 = 111,
+ WL_RATE_3X3_SDM_MCS22 = 112,
+ WL_RATE_3X3_SDM_MCS23 = 113,
+
+ WL_RATE_3X3_VHT0SS3 = 106,
+ WL_RATE_3X3_VHT1SS3 = 107,
+ WL_RATE_3X3_VHT2SS3 = 108,
+ WL_RATE_3X3_VHT3SS3 = 109,
+ WL_RATE_3X3_VHT4SS3 = 110,
+ WL_RATE_3X3_VHT5SS3 = 111,
+ WL_RATE_3X3_VHT6SS3 = 112,
+ WL_RATE_3X3_VHT7SS3 = 113,
+ WL_RATE_3X3_VHT8SS3 = 114,
+ WL_RATE_3X3_VHT9SS3 = 115,
+
+
+ WL_NUMRATES = 116
+} clm_rates_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 000000000000..c3e05c9796c8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,789 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h 329678 2012-04-26 08:51:32Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+
+#define ALL_INTERFACES 0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+ DHD_BUS_DOWN, /* Not ready for frame transfers */
+ DHD_BUS_LOAD, /* Download access only (CPU reset) */
+ DHD_BUS_DATA /* Ready for frame transfers */
+};
+
+
+/* Firmware requested operation mode */
+#define STA_MASK 0x0001
+#define HOSTAPD_MASK 0x0002
+#define WFD_MASK 0x0004
+#define SOFTAP_FW_MASK 0x0008
+#define P2P_GO_ENABLED 0x0010
+#define P2P_GC_ENABLED 0x0020
+#define CONCURENT_MASK 0x00F0
+
+/* max sequential rxcntl timeouts to set HANG event */
+#define MAX_CNTL_TIMEOUT 2
+
+#define DHD_SCAN_ACTIVE_TIME 40 /* ms : Embedded default Active setting from DHD Driver */
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD Driver */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY (10) /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS (2000) /* ms: time out in waiting wifi to come up */
+#endif
+
+enum dhd_bus_wake_state {
+ WAKE_LOCK_OFF,
+ WAKE_LOCK_PRIV,
+ WAKE_LOCK_DPC,
+ WAKE_LOCK_IOCTL,
+ WAKE_LOCK_DOWNLOAD,
+ WAKE_LOCK_TMOUT,
+ WAKE_LOCK_WATCHDOG,
+ WAKE_LOCK_LINK_DOWN_TMOUT,
+ WAKE_LOCK_PNO_FIND_TMOUT,
+ WAKE_LOCK_SOFTAP_SET,
+ WAKE_LOCK_SOFTAP_STOP,
+ WAKE_LOCK_SOFTAP_START,
+ WAKE_LOCK_SOFTAP_THREAD,
+ WAKE_LOCK_MAX
+};
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF,
+ DHD_PREALLOC_DATABUF,
+ DHD_PREALLOC_OSL_BUF
+};
+
+typedef enum {
+ DHD_IF_NONE = 0,
+ DHD_IF_ADD,
+ DHD_IF_DEL,
+ DHD_IF_CHANGE,
+ DHD_IF_DELETING
+} dhd_if_state_t;
+
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+
+uint8* dhd_os_prealloc(void *osh, int section, uint size);
+void dhd_os_prefree(void *osh, void *addr, uint size);
+#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size)
+#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size)
+
+#else
+
+#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size)
+#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size)
+
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+ void **p;
+ uint8 flow_id;
+ uint8 cur_idx;
+ uint8 exp_idx;
+ uint8 max_idx;
+ uint8 pend_pkts;
+} reorder_info_t;
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+ /* Linkage ponters */
+ osl_t *osh; /* OSL handle */
+ struct dhd_bus *bus; /* Bus module handle */
+ struct dhd_prot *prot; /* Protocol module handle */
+ struct dhd_info *info; /* Info module handle */
+
+ /* Internal dhd items */
+ bool up; /* Driver up/down (to OS) */
+ bool txoff; /* Transmit flow-controlled */
+ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
+ enum dhd_bus_state busstate;
+ uint hdrlen; /* Total DHD header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ uint8 wme_dp; /* wme discard priority */
+
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ ulong drv_version; /* Version of dongle-resident driver */
+ struct ether_addr mac; /* MAC address obtained from dongle */
+ dngl_stats_t dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+ ulong tx_packets; /* Data packets sent to dongle */
+ ulong tx_multicast; /* Multicast data packets sent to dongle */
+ ulong tx_errors; /* Errors in sending data to dongle */
+ ulong tx_ctlpkts; /* Control packets sent to dongle */
+ ulong tx_ctlerrs; /* Errors sending control frames to dongle */
+ ulong rx_packets; /* Packets sent up the network interface */
+ ulong rx_multicast; /* Multicast packets sent up the network interface */
+ ulong rx_errors; /* Errors processing rx data packets */
+ ulong rx_ctlpkts; /* Control frames processed from dongle */
+ ulong rx_ctlerrs; /* Errors in processing rx control frames */
+ ulong rx_dropped; /* Packets dropped locally (no memory) */
+ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
+ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
+
+ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
+ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
+ ulong fc_packets; /* Number of flow control pkts recvd */
+
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+ /* Suspend disable flag and "in suspend" flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+ int pno_suspend; /* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+ int dtim_skip; /* dtim skip , default 0 means wake each dtim */
+
+#ifdef PKT_FILTER_SUPPORT
+ int early_suspended; /* Early suspend status */
+ int dhcp_in_progress; /* DHCP period */
+#endif
+
+ /* Pkt filter defination */
+ char * pktfilter[100];
+ int pktfilter_count;
+
+ wl_country_t dhd_cspec; /* Current Locale info */
+ char eventmask[WL_EVENTING_MASK_LEN];
+ int op_mode; /* STA, HostAPD, WFD, SoftAP */
+
+/* Set this to 1 to use a seperate interface (p2p0) for p2p operations.
+ * For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework
+ * see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile
+ */
+/* #define WL_ENABLE_P2P_IF 1 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+ struct wake_lock wakelock[WAKE_LOCK_MAX];
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif
+
+#ifdef WLBTAMP
+ uint16 maxdatablks;
+#endif /* WLBTAMP */
+#ifdef PROP_TXSTATUS
+ int wlfc_enabled;
+ void* wlfc_state;
+#endif
+ bool dongle_isolation;
+ int hang_was_sent;
+ int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
+ int txcnt_timeout; /* counter txcnt timeout to send HANG */
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+ struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+} dhd_pub_t;
+
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+ #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define _DHD_PM_RESUME_WAIT(a, b) do {\
+ int retry = 0; \
+ SMP_RD_BARRIER_DEPENDS(); \
+ while (dhd_mmc_suspend && retry++ != b) { \
+ SMP_RD_BARRIER_DEPENDS(); \
+ wait_event_interruptible_timeout(a, !dhd_mmc_suspend, HZ/100); \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0)
+ #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9999; \
+ while ((exp) && (countdown >= 10000)) { \
+ wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+ countdown -= 10000; \
+ } \
+ } while (0)
+
+ #else
+
+ #define DHD_PM_RESUME_WAIT_INIT(a)
+ #define DHD_PM_RESUME_WAIT(a)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a)
+ #define DHD_PM_RESUME_RETURN_ERROR(a)
+ #define DHD_PM_RESUME_RETURN
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#ifndef DHDTHREAD
+#undef SPINWAIT_SLEEP
+#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us)
+#endif /* DHDTHREAD */
+#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/* Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_timeout_enable(pub, val)
+#define DHD_PACKET_TIMEOUT_MS 1000
+#define DHD_EVENT_TIMEOUT_MS 1500
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+typedef struct dhd_if_event {
+ uint8 ifidx;
+ uint8 action;
+ uint8 flags;
+ uint8 bssidx;
+ uint8 is_AP;
+} dhd_if_event_t;
+
+typedef enum dhd_attach_states
+{
+ DHD_ATTACH_STATE_INIT = 0x0,
+ DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+ DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+ DHD_ATTACH_STATE_ADD_IF = 0x4,
+ DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+ DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+ DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+ DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+ DHD_ATTACH_STATE_CFG80211 = 0x80,
+ DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+ DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID -1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID -2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+
+#ifdef PNO_SUPPORT
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local,
+ int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+#endif /* PNO_SUPPORT */
+
+#define DHD_UNICAST_FILTER_NUM 0
+#define DHD_BROADCAST_FILTER_NUM 1
+#define DHD_MULTICAST4_FILTER_NUM 2
+#define DHD_MULTICAST6_FILTER_NUM 3
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+extern bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+typedef struct {
+ uint32 limit; /* Expiration time (usec) */
+ uint32 increment; /* Current expiration increment (usec) */
+ uint32 elapsed; /* Current elapsed time (usec) */
+ uint32 tick; /* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+ wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+ int ifindex);
+
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+ char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf);
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+extern void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+extern void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+
+typedef enum cust_gpio_modes {
+ WLAN_RESET_ON,
+ WLAN_RESET_OFF,
+ WLAN_POWER_ON,
+ WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN 2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xe
+#define DHD_BAD_IF -0xf
+
+#ifdef PROP_TXSTATUS
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+ /*
+ b[11 ] - 1 = this packet was sent in response to one time packet request,
+ do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+ b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+ b[9 ] - 1 = packet is host->firmware (transmit direction)
+ - 0 = packet received from firmware (firmware->host)
+ b[8 ] - 1 = packet was sent due to credit_request (pspoll),
+ packet does not count against FIFO credit.
+ - 0 = normal transaction, packet counts against FIFO credit
+ b[7 ] - 1 = AP, 0 = STA
+ b[6:4] - AC FIFO number
+ b[3:0] - interface index
+ */
+ uint16 if_flags;
+ /* destination MAC address for this packet so that not every
+ module needs to open the packet to find this
+ */
+ uint8 dstn_ether[ETHER_ADDR_LEN];
+ /*
+ This 32-bit goes from host to device for every packet.
+ */
+ uint32 htod_tag;
+ /* bus specific stuff */
+ union {
+ struct {
+ void* stuff;
+ uint32 thing1;
+ uint32 thing2;
+ } sd;
+ struct {
+ void* bus;
+ void* urb;
+ } usb;
+ } bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_IFMASK 0xf
+#define DHD_PKTTAG_IFTYPE_MASK 0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT 7
+#define DHD_PKTTAG_FIFO_MASK 0x7
+#define DHD_PKTTAG_FIFO_SHIFT 4
+
+#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
+
+#define DHD_PKTTAG_PKTDIR_MASK 0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT 9
+
+#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
+
+#define DHD_PKTTAG_INVALID_FIFOID 0x7
+
+#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+ (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK)
+#define DHD_PKTTAG_IF(tag) (((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK)
+
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+ (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+ (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+ (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+ (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+ (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+ (dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
+#endif
+
+#endif /* PROP_TXSTATUS */
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock) *lock = 0
+#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \
+ NdisStallExecution(1);
+#define IFUNLOCK(lock) InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+
+#ifdef PNO_SUPPORT
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pnoenable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
+ ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
+extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
+extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
+#endif /* PNO_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES 8
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 000000000000..15c605ea248f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,338 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c 303834 2011-12-20 06:17:39Z $
+ */
+#ifndef WLBTAMP
+#error "WLBTAMP is not defined"
+#endif /* WLBTAMP */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+ uint len = sizeof(buf);
+ wl_ioctl_t ioc;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BADLEN;
+
+ if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+ return BCME_BADLEN;
+
+ len = bcm_mkiovar("HCI_cmd",
+ (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+ int prec;
+ struct pktq *q;
+ uint count = 0;
+
+ q = dhd_bus_txq(pub->bus);
+ if (q == NULL)
+ return;
+
+ DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+ dhd_os_sdlock_txq(pub);
+
+ /* Walk through the txq and toss all HCI ACL data packets */
+ PKTQ_PREC_ITER(q, prec) {
+ void *head_pkt = NULL;
+
+ while (pktq_ppeek(q, prec) != head_pkt) {
+ void *pkt = pktq_pdeq(q, prec);
+ int ifidx;
+
+ PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+ dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
+
+ if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+ struct ether_header *eh =
+ (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+ if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+ struct dot11_llc_snap_header *lsh =
+ (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+ DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+ amp_hci_ACL_data_t *ACL_data =
+ (amp_hci_ACL_data_t *)&lsh[1];
+ uint16 handle = ltoh16(ACL_data->handle);
+
+ if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+ PKTFREE(pub->osh, pkt, TRUE);
+ count ++;
+ continue;
+ }
+ }
+ }
+ }
+
+ dhd_prot_hdrpush(pub, ifidx, pkt);
+ PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+
+ if (head_pkt == NULL)
+ head_pkt = pkt;
+ pktq_penq(q, prec, pkt);
+ }
+ }
+
+ dhd_os_sdunlock_txq(pub);
+
+ DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ * < 0: stop, fail
+ * > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+ int status = 0;
+
+ switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+ case HCI_Enhanced_Flush: {
+ eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+ dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+ break;
+ }
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+ int status;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+ DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+ len, cmd_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* intercept and handle the HCI cmd locally */
+ if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+ return 0;
+ else if (status < 0)
+ return status;
+
+ /* copy in HCI cmd */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(cmd, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(eh->ether_dhost);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = 0;
+
+ return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+ amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+
+ if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+ len, data_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* copy in HCI ACL data header and HCI ACL data */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(data, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = HTON16(BTA_PROT_L2CAP);
+
+ return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+ uint16 handle = ltoh16(ACL_data->handle);
+ uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+ wl_event_msg_t event;
+ uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+ amp_hci_event_t *evt;
+ num_completed_data_blocks_evt_parms_t *parms;
+
+ uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+ /* update the event struct */
+ memset(&event, 0, sizeof(event));
+ event.version = hton16(BCM_EVENT_MSG_VERSION);
+ event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+ event.status = 0;
+ event.reason = 0;
+ event.auth_type = 0;
+ event.datalen = hton32(len);
+ event.flags = 0;
+
+ /* generate Number of Completed Blocks event */
+ evt = (amp_hci_event_t *)data;
+ evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+ evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+ parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+ htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+ parms->num_handles = 1;
+ htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+ parms->completed[0].pkts = 1;
+ parms->completed[0].blocks = 1;
+
+ dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+ amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+ switch (evt->ecode) {
+ case HCI_Command_Complete: {
+ cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+ switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+ case HCI_Read_Data_Block_Size: {
+ read_data_block_size_evt_parms_t *parms2 =
+ (read_data_block_size_evt_parms_t *)parms->parms;
+ dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+ break;
+ }
+ }
+ break;
+ }
+
+ case HCI_Flush_Occurred: {
+ flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+ dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+ break;
+ }
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 000000000000..0337f15d285a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 000000000000..362677192146
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,109 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h 313456 2012-02-07 22:03:40Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+extern void dhd_disable_intr(dhd_pub_t *dhd);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+
+#define DHD_SET_BUS_STATE_DOWN(_bus) do { \
+ (_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 000000000000..abd08731cde9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,2838 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c 328424 2012-04-19 05:23:09Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE
+ * defined in dhd_sdio.c (amount of header tha might be added)
+ * plus any space that might be needed for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
+
+#ifdef PROP_TXSTATUS
+typedef struct dhd_wlfc_commit_info {
+ uint8 needs_hdr;
+ uint8 ac_fifo_credit_spent;
+ ewlfc_packet_state_t pkt_type;
+ wlfc_mac_descriptor_t* mac_entry;
+ void* p;
+} dhd_wlfc_commit_info_t;
+#endif /* PROP_TXSTATUS */
+
+typedef struct dhd_prot {
+ uint16 reqid;
+ uint8 pending;
+ uint32 lastcmd;
+ uint8 bus_header[BUS_HEADER_LEN];
+ cdc_ioctl_t msg;
+ unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+ int err = 0;
+ dhd_prot_t *prot = dhd->prot;
+ int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+ err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+ int ret;
+ int cdc_len = len + sizeof(cdc_ioctl_t);
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ do {
+ ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+ if (ret < 0)
+ break;
+ } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+ return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ uint32 id, flags = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ if (!dhd->hang_was_sent)
+ DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void*)&msg[1];
+
+ /* Copy info buffer */
+ if (buf)
+ {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0;
+ uint32 flags, id;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+ uint8 action;
+#if defined(NDIS630)
+ bool acquired = FALSE;
+#endif
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+#if defined(NDIS630)
+ if (dhd_os_proto_block(dhd))
+ {
+ acquired = TRUE;
+ }
+ else
+ {
+ /* attempt to acquire protocol mutex timed out. */
+ ret = -1;
+ return ret;
+ }
+#endif /* NDIS630 */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ action = ioc->set;
+ if (action & WL_IOCTL_ACTION_SET)
+ ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ else {
+ ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret - sizeof(cdc_ioctl_t);
+ }
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ cdc_ioctl_t *msg = &prot->msg;
+ ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ prot->pending = FALSE;
+
+done:
+#if defined(NDIS630)
+ if (acquired)
+ dhd_os_proto_unblock(dhd);
+#endif
+ return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+#ifdef PROP_TXSTATUS
+void
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i;
+ uint8* ea;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhdp->wlfc_state;
+ wlfc_hanger_t* h;
+ wlfc_mac_descriptor_t* mac_table;
+ wlfc_mac_descriptor_t* interfaces;
+ char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+ if (wlfc == NULL) {
+ bcm_bprintf(strbuf, "wlfc not initialized yet\n");
+ return;
+ }
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ }
+
+ mac_table = wlfc->destination_entries.nodes;
+ interfaces = wlfc->destination_entries.interfaces;
+ bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+ if (h) {
+ bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+ "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+ h->pushed,
+ h->popped,
+ h->failed_to_push,
+ h->failed_to_pop,
+ h->failed_slotfind,
+ (h->pushed - h->popped));
+ }
+
+ bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+ "(dq_full,sendq_full, rollback_fail) = (%d,%d,%d,%d), (%d,%d,%d)\n",
+ wlfc->stats.tlv_parse_failed,
+ wlfc->stats.credit_request_failed,
+ wlfc->stats.mac_update_failed,
+ wlfc->stats.psmode_update_failed,
+ wlfc->stats.delayq_full_error,
+ wlfc->stats.sendq_full_error,
+ wlfc->stats.rollback_failed);
+
+ bcm_bprintf(strbuf, "SENDQ (len,credit,sent) "
+ "(AC0[%d,%d,%d],AC1[%d,%d,%d],AC2[%d,%d,%d],AC3[%d,%d,%d],BC_MC[%d,%d,%d])\n",
+ wlfc->SENDQ.q[0].len, wlfc->FIFO_credit[0], wlfc->stats.sendq_pkts[0],
+ wlfc->SENDQ.q[1].len, wlfc->FIFO_credit[1], wlfc->stats.sendq_pkts[1],
+ wlfc->SENDQ.q[2].len, wlfc->FIFO_credit[2], wlfc->stats.sendq_pkts[2],
+ wlfc->SENDQ.q[3].len, wlfc->FIFO_credit[3], wlfc->stats.sendq_pkts[3],
+ wlfc->SENDQ.q[4].len, wlfc->FIFO_credit[4], wlfc->stats.sendq_pkts[4]);
+
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "SENDQ dropped: AC[0-3]:(%d,%d,%d,%d), (bcmc,atim):(%d,%d)\n",
+ wlfc->stats.dropped_qfull[0], wlfc->stats.dropped_qfull[1],
+ wlfc->stats.dropped_qfull[2], wlfc->stats.dropped_qfull[3],
+ wlfc->stats.dropped_qfull[4], wlfc->stats.dropped_qfull[5]);
+#endif
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (interfaces[i].occupied) {
+ char* iftype_desc;
+
+ if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+ iftype_desc = "<Unknown";
+ else
+ iftype_desc = iftypes[interfaces[i].iftype];
+
+ ea = interfaces[i].ea;
+ bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s"
+ "netif_flow_control:%s\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ interfaces[i].interface_id,
+ iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+ ? " OFF":" ON"));
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ interfaces[i].psq.len,
+ ((interfaces[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ interfaces[i].requested_credit);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ interfaces[i].psq.q[0].len,
+ interfaces[i].psq.q[1].len,
+ interfaces[i].psq.q[2].len,
+ interfaces[i].psq.q[3].len,
+ interfaces[i].psq.q[4].len,
+ interfaces[i].psq.q[5].len,
+ interfaces[i].psq.q[6].len,
+ interfaces[i].psq.q[7].len);
+ }
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (mac_table[i].occupied) {
+ ea = mac_table[i].ea;
+ bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ mac_table[i].interface_id);
+
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ mac_table[i].psq.len,
+ ((mac_table[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ mac_table[i].requested_credit);
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+ i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ mac_table[i].psq.q[0].len,
+ mac_table[i].psq.q[1].len,
+ mac_table[i].psq.q[2].len,
+ mac_table[i].psq.q[3].len,
+ mac_table[i].psq.q[4].len,
+ mac_table[i].psq.q[5].len,
+ mac_table[i].psq.q[6].len,
+ mac_table[i].psq.q[7].len);
+ }
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int avg;
+ int moving_avg = 0;
+ int moving_samples;
+
+ if (wlfc->stats.latency_sample_count) {
+ moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+ for (i = 0; i < moving_samples; i++)
+ moving_avg += wlfc->stats.deltas[i];
+ moving_avg /= moving_samples;
+
+ avg = (100 * wlfc->stats.total_status_latency) /
+ wlfc->stats.latency_sample_count;
+ bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+ "(%d.%d, %03d, %03d)\n",
+ moving_samples, avg/100, (avg - (avg/100)*100),
+ wlfc->stats.latency_most_recent,
+ moving_avg);
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+ "back = (%d,%d,%d,%d,%d,%d)\n",
+ wlfc->stats.fifo_credits_sent[0],
+ wlfc->stats.fifo_credits_sent[1],
+ wlfc->stats.fifo_credits_sent[2],
+ wlfc->stats.fifo_credits_sent[3],
+ wlfc->stats.fifo_credits_sent[4],
+ wlfc->stats.fifo_credits_sent[5],
+
+ wlfc->stats.fifo_credits_back[0],
+ wlfc->stats.fifo_credits_back[1],
+ wlfc->stats.fifo_credits_back[2],
+ wlfc->stats.fifo_credits_back[3],
+ wlfc->stats.fifo_credits_back[4],
+ wlfc->stats.fifo_credits_back[5]);
+ {
+ uint32 fifo_cr_sent = 0;
+ uint32 fifo_cr_acked = 0;
+ uint32 request_cr_sent = 0;
+ uint32 request_cr_ack = 0;
+ uint32 bc_mc_cr_ack = 0;
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+ fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+ }
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+ fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.nodes[i].dstncredit_acks;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.interfaces[i].dstncredit_acks;
+ }
+ }
+ bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+ "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+ fifo_cr_sent, fifo_cr_acked,
+ request_cr_sent, request_cr_ack,
+ wlfc->destination_entries.other.dstncredit_acks,
+ bc_mc_cr_ack,
+ wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)"
+ "(freed,free_err,rollback)) = "
+ "((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.pktin,
+ wlfc->stats.pkt2bus,
+ wlfc->stats.txstatus_in,
+ wlfc->stats.dhd_hdrpulls,
+
+ wlfc->stats.pktdropped,
+ wlfc->stats.wlfc_header_only_pkt,
+ wlfc->stats.wlc_tossed_pkts,
+
+ wlfc->stats.pkt_freed,
+ wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+ bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+ "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+
+ wlfc->stats.d11_suppress,
+ wlfc->stats.wl_suppress,
+ wlfc->stats.bad_suppress,
+
+ wlfc->stats.psq_d11sup_enq,
+ wlfc->stats.psq_wlsup_enq,
+ wlfc->stats.psq_hostq_enq,
+ wlfc->stats.mac_handle_notfound,
+
+ wlfc->stats.psq_d11sup_retx,
+ wlfc->stats.psq_wlsup_retx,
+ wlfc->stats.psq_hostq_retx);
+ return;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+ a status comes back, suppress or otherwise.
+
+ hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+ int i;
+ wlfc_hanger_t* hanger;
+
+ /* allow only up to a specific size for now */
+ ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+ if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+ return NULL;
+
+ memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+ hanger->max_items = max_items;
+
+ for (i = 0; i < hanger->max_items; i++) {
+ hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+ return hanger;
+}
+
+static int
+dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+ return BCME_OK;
+ }
+ return BCME_BADARG;
+}
+
+static uint16
+dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE)
+ return (uint16)i;
+ }
+ h->failed_slotfind++;
+ }
+ return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+ h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+ h->items[slot_id].pkt = pkt;
+ h->items[slot_id].identifier = slot_id;
+ h->pushed++;
+ }
+ else {
+ h->failed_to_push++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+
+ if (h) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ *pktout = h->items[slot_id].pkt;
+ if (remove_from_hanger) {
+ h->items[slot_id].state =
+ WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[slot_id].pkt = NULL;
+ h->items[slot_id].identifier = 0;
+ h->popped++;
+ }
+ }
+ else {
+ h->failed_to_pop++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+ uint8 tim_bmp, uint8 mac_handle, uint32 htodtag)
+{
+ uint32 wl_pktinfo = 0;
+ uint8* wlh;
+ uint8 dataOffset;
+ uint8 fillers;
+ uint8 tim_signal_len = 0;
+
+ struct bdc_header *h;
+
+ if (tim_signal) {
+ tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ }
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len;
+ fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+ dataOffset += fillers;
+
+ PKTPUSH(ctx->osh, p, dataOffset);
+ wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+ wl_pktinfo = htol32(htodtag);
+
+ wlh[0] = WLFC_CTL_TYPE_PKTTAG;
+ wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG;
+ memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32));
+
+ if (tim_signal_len) {
+ wlh[dataOffset - fillers - tim_signal_len ] =
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 1] =
+ WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+ wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+ }
+ if (fillers)
+ memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+ PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+ h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(p))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = dataOffset >> 2;
+ BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+ struct bdc_header *h;
+
+ if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+ WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+ h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+ /* pull BDC header */
+ PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+ /* pull wl-header */
+ PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+ return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+ int i;
+ wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+ uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+ uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+
+ if (((ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_STA) ||
+ ETHER_ISMULTI(dstn) ||
+ (ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_P2P_CLIENT)) &&
+ (ctx->destination_entries.interfaces[ifid].occupied)) {
+ return &ctx->destination_entries.interfaces[ifid];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (table[i].occupied) {
+ if (table[i].interface_id == ifid) {
+ if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN))
+ return &table[i];
+ }
+ }
+ }
+ return &ctx->destination_entries.other;
+}
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+ void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+ /*
+ put the packet back to the head of queue
+
+ - a packet from send-q will need to go back to send-q and not delay-q
+ since that will change the order of packets.
+ - suppressed packet goes back to suppress sub-queue
+ - pull out the header, if new or delayed packet
+
+ Note: hslot is used only when header removal is done.
+ */
+ wlfc_mac_descriptor_t* entry;
+ void* pktout;
+ int rc = BCME_OK;
+ int prec;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+ if (entry != NULL) {
+ if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) {
+ /* wl-header is saved for suppressed packets */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* remove header first */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+ /* delay-q packets are going to delay-q */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* these are going to SENDQ */
+ if (WLFC_PKTQ_PENQ_HEAD(&ctx->SENDQ, prec, p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ /* free the hanger slot */
+ dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
+
+ /* decrement sequence count */
+ WLFC_DECR_SEQCOUNT(entry, prec);
+ }
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the firmware (for pspoll etc.)
+ */
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ entry->requested_credit++;
+ }
+ }
+ else {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+ else
+ ctx->stats.rollback++;
+
+ return rc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+ if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+ /* start traffic */
+ ctx->hostif_flow_state[if_id] = OFF;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("F"));
+ dhd_txflowcontrol(ctx->dhdp, if_id, OFF);
+ ctx->toggle_host_if = 0;
+ }
+ if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+ /* stop traffic */
+ ctx->hostif_flow_state[if_id] = ON;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("N"));
+ dhd_txflowcontrol(ctx->dhdp, if_id, ON);
+ ctx->host_ifidx = if_id;
+ ctx->toggle_host_if = 1;
+ }
+ return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ uint8 ta_bmp)
+{
+ int rc = BCME_OK;
+ void* p = NULL;
+ int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12;
+
+ /* allocate a dummy packet */
+ p = PKTGET(ctx->osh, dummylen, TRUE);
+ if (p) {
+ PKTPULL(ctx->osh, p, dummylen);
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+ _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0);
+ DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+ ctx->stats.signal_only_pkts_sent++;
+#endif
+ rc = dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p);
+ if (rc != BCME_OK) {
+ PKTFREE(ctx->osh, p, TRUE);
+ }
+ }
+ else {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, dummylen));
+ rc = BCME_NOMEM;
+ }
+ return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ int prec)
+{
+ bool rc = FALSE;
+
+ if (entry->state == WLFC_STATE_CLOSE) {
+ if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+ (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+ if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp & ~ NBITVAL(prec);
+ }
+ }
+ else {
+ if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp | NBITVAL(prec);
+ }
+ }
+ }
+ if (rc) {
+ /* request a TIM update to firmware at the next piggyback opportunity */
+ if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+ entry->send_tim_signal = 1;
+ _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ entry->send_tim_signal = 0;
+ }
+ else {
+ rc = FALSE;
+ }
+ }
+ return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+ wlfc_mac_descriptor_t* entry;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_NOTFOUND;
+ }
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ ctx->stats.delayq_full_error++;
+ /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+ WLFC_DBGMESG(("s"));
+ return BCME_ERROR;
+ }
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+ int rc = BCME_OK;
+ int hslot = WLFC_HANGER_MAXITEMS;
+ bool send_tim_update = FALSE;
+ uint32 htod = 0;
+ uint8 free_ctr;
+
+ *slot = hslot;
+
+ if (entry == NULL) {
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ }
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+ if (entry->send_tim_signal) {
+ send_tim_update = TRUE;
+ entry->send_tim_signal = 0;
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ }
+ if (header_needed) {
+ hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ }
+ else {
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ }
+ WLFC_PKTID_HSLOT_SET(htod, hslot);
+ WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation);
+
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ /*
+ Indicate that this packet is being sent in response to an
+ explicit request from the firmware side.
+ */
+ WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+ }
+ else {
+ WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+ }
+ if (header_needed) {
+ rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ if (rc == BCME_OK) {
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ /*
+ a new header was created for this packet.
+ push to hanger slot and scrub q. Since bus
+ send succeeded, increment seq number as well.
+ */
+ rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ if (rc == BCME_OK) {
+ /* increment free running sequence count */
+ WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+#ifdef PROP_TXSTATUS_DEBUG
+ ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+ OSL_SYSUPTIME();
+#endif
+ }
+ else {
+ WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n",
+ __FUNCTION__, rc));
+ }
+ }
+ }
+ else {
+ /* remove old header */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ /* push new header */
+ _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ }
+ *slot = hslot;
+ return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, int prec)
+{
+ if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+ WLC_E_IF_ROLE_P2P_GO) {
+ /* - destination interface is of type p2p GO.
+ For a p2pGO interface, if the destination is OPEN but the interface is
+ CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+ destination-specific-credit left send packets. This is because the
+ firmware storing the destination-specific-requested packet in queue.
+ */
+ if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0))
+ return 1;
+ }
+ /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+ if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0)) ||
+ (!(entry->ac_bitmap & (1 << prec))))
+ return 1;
+
+ return 0;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx,
+ int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out)
+{
+ wlfc_mac_descriptor_t* entry;
+ wlfc_mac_descriptor_t* table;
+ uint8 token_pos;
+ int total_entries;
+ void* p = NULL;
+ int pout;
+ int i;
+
+ *entry_out = NULL;
+ token_pos = ctx->token_pos[prec];
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = 1;
+ *needs_hdr = 1;
+
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&ctx->destination_entries;
+ total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+
+ for (i = 0; i < total_entries; i++) {
+ entry = &table[(token_pos + i) % total_entries];
+ if (entry->occupied) {
+ if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+ p = pktq_mdeq(&entry->psq,
+ /* higher precedence will be picked up first,
+ i.e. suppressed packets before delayed ones
+ */
+ (NBITVAL((prec << 1) + 1) | NBITVAL((prec << 1))),
+ &pout);
+ if (p != NULL) {
+ /* did the packet come from suppress sub-queue? */
+ if (pout == ((prec << 1) + 1)) {
+ /*
+ this packet was suppressed and was sent on the bus
+ previously; this already has a header
+ */
+ *needs_hdr = 0;
+ }
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ /*
+ if the packet was pulled out while destination is in
+ closed state but had a non-zero packets requested,
+ then this should not count against the FIFO credit.
+ That is due to the fact that the firmware will
+ most likely hold onto this packet until a suitable
+ time later to push it to the appropriate AC FIFO.
+ */
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ /* move token to ensure fair round-robin */
+ ctx->token_pos[prec] =
+ (token_pos + i + 1) % total_entries;
+ *entry_out = entry;
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq,
+ DHD_PKTTAG_IF(PKTTAG(p)));
+ /*
+ A packet has been picked up, update traffic
+ availability bitmap, if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ return p;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static void*
+_dhd_wlfc_deque_sendq(athost_wl_status_info_t* ctx, int prec)
+{
+ wlfc_mac_descriptor_t* entry;
+ void* p;
+
+
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p != NULL) {
+ if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))
+ /* bc/mc packets do not have a delay queue */
+ return p;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return p;
+ }
+
+ while ((p != NULL)) {
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("D"));
+ /* dhd_txcomplete(ctx->dhdp, p, FALSE); */
+ PKTFREE(ctx->osh, p, TRUE);
+ ctx->stats.delayq_full_error++;
+ }
+ /*
+ A packet has been pushed, update traffic availability bitmap,
+ if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p == NULL)
+ break;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if ((entry == NULL) || (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))) {
+ return p;
+ }
+ }
+ }
+ return p;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ int rc = BCME_OK;
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+ pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+ }
+ else if (action == eWLFC_MAC_ENTRY_ACTION_UPDATE) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+ }
+ else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+ entry->occupied = 0;
+ entry->state = WLFC_STATE_CLOSE;
+ entry->requested_credit = 0;
+ /* enable after packets are queued-deqeued properly.
+ pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0);
+ */
+ }
+ return rc;
+}
+
+int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac)
+{
+ int lender_ac;
+ int rc = BCME_ERROR;
+
+ if (ctx == NULL || available_credit_map == 0) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ /* Borrow from lowest priority available AC (including BC/MC credits) */
+ for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) {
+ if ((available_credit_map && (1 << lender_ac)) &&
+ (ctx->FIFO_credit[lender_ac] > 0)) {
+ ctx->credits_borrowed[borrower_ac][lender_ac]++;
+ ctx->FIFO_credit[lender_ac]--;
+ rc = BCME_OK;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_interface_entry_update(void* state,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ wlfc_mac_descriptor_t* entry;
+
+ if (ifid >= WLFC_MAX_IFNUM)
+ return BCME_BADARG;
+
+ entry = &ctx->destination_entries.interfaces[ifid];
+ return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea);
+}
+
+int
+dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ /* update the AC FIFO credit map */
+ ctx->FIFO_credit[0] = credits[0];
+ ctx->FIFO_credit[1] = credits[1];
+ ctx->FIFO_credit[2] = credits[2];
+ ctx->FIFO_credit[3] = credits[3];
+ /* credit for bc/mc packets */
+ ctx->FIFO_credit[4] = credits[4];
+ /* credit for ATIM FIFO is not used yet. */
+ ctx->FIFO_credit[5] = 0;
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enque_sendq(void* state, int prec, void* p)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ if ((state == NULL) ||
+ /* prec = AC_COUNT is used for bc/mc queue */
+ (prec > AC_COUNT) ||
+ (p == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+ if (FALSE == dhd_prec_enq(ctx->dhdp, &ctx->SENDQ, p, prec)) {
+ ctx->stats.sendq_full_error++;
+ /*
+ WLFC_DBGMESG(("Error: %s():%d, qlen:%d\n",
+ __FUNCTION__, __LINE__, ctx->SENDQ.len));
+ */
+ WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, prec);
+ WLFC_DBGMESG(("Q"));
+ PKTFREE(ctx->osh, p, TRUE);
+ return BCME_ERROR;
+ }
+ ctx->stats.pktin++;
+ /* _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); */
+ return BCME_OK;
+}
+
+int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+ dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ uint32 hslot;
+ int rc;
+
+ /*
+ if ac_fifo_credit_spent = 0
+
+ This packet will not count against the FIFO credit.
+ To ensure the txstatus corresponding to this packet
+ does not provide an implied credit (default behavior)
+ mark the packet accordingly.
+
+ if ac_fifo_credit_spent = 1
+
+ This is a normal packet and it counts against the FIFO
+ credit count.
+ */
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+ rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+ commit_info->needs_hdr, &hslot);
+
+ if (rc == BCME_OK)
+ rc = fcommit(commit_ctx, commit_info->p);
+ else
+ ctx->stats.generic_error++;
+
+ if (rc == BCME_OK) {
+ ctx->stats.pkt2bus++;
+ if (commit_info->ac_fifo_credit_spent) {
+ ctx->stats.sendq_pkts[ac]++;
+ WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+ }
+ }
+ else {
+ /*
+ bus commit has failed, rollback.
+ - remove wl-header for a delayed packet
+ - save wl-header header for suppressed packets
+ */
+ rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p,
+ (commit_info->pkt_type), hslot);
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+
+ rc = BCME_ERROR;
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ int ac;
+ int credit;
+ int rc;
+ dhd_wlfc_commit_info_t commit_info;
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ int credit_count = 0;
+ int bus_retry_count = 0;
+ uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */
+
+ if ((state == NULL) ||
+ (fcommit == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ memset(&commit_info, 0, sizeof(commit_info));
+
+ /*
+ Commit packets for regular AC traffic. Higher priority first.
+ First, use up FIFO credits available to each AC. Based on distribution
+ and credits left, borrow from other ACs as applicable
+
+ -NOTE:
+ If the bus between the host and firmware is overwhelmed by the
+ traffic from host, it is possible that higher priority traffic
+ starves the lower priority queue. If that occurs often, we may
+ have to employ weighted round-robin or ucode scheme to avoid
+ low priority packet starvation.
+ */
+
+ for (ac = AC_COUNT; ac >= 0; ac--) {
+
+ int initial_credit_count = ctx->FIFO_credit[ac];
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ do {
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac);
+ if (commit_info.p == NULL)
+ break;
+ else if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(commit_info.p)))) {
+ ASSERT(ac == AC_COUNT);
+
+ if (ctx->FIFO_credit[ac]) {
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx,
+ ac_available, ac);
+ credit_count--;
+ }
+ } else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR((" %s: bus error\n",
+ __FUNCTION__));
+ return rc;
+ }
+ }
+ }
+ }
+
+ } while (commit_info.p);
+
+ for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ credit++;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ ctx->FIFO_credit[ac] -= credit;
+ return rc;
+ }
+ }
+ }
+
+ ctx->FIFO_credit[ac] -= credit;
+
+
+ /* If no credits were used, the queue is idle and can be re-used
+ Note that resv credits cannot be borrowed
+ */
+ if (initial_credit_count == ctx->FIFO_credit[ac]) {
+ ac_available |= (1 << ac);
+ credit_count += ctx->FIFO_credit[ac];
+ }
+ }
+
+ /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD
+
+ Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to:
+ a) ignore BC/MC for deferring borrow
+ b) ignore AC_BE being available along with other ACs
+ (this should happen only for pure BC/MC traffic)
+
+ i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and
+ we do not care if AC_BE and BC/MC are available or not
+ */
+ if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) {
+
+ if (ctx->allow_credit_borrow) {
+ ac = 1; /* Set ac to AC_BE and borrow credits */
+ }
+ else {
+ int delta;
+ int curr_t = OSL_SYSUPTIME();
+
+ if (curr_t > ctx->borrow_defer_timestamp)
+ delta = curr_t - ctx->borrow_defer_timestamp;
+ else
+ delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp;
+
+ if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+ /* Reset borrow but defer to next iteration (defensive borrowing) */
+ ctx->allow_credit_borrow = TRUE;
+ ctx->borrow_defer_timestamp = 0;
+ }
+ return BCME_OK;
+ }
+ }
+ else {
+ /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+ ctx->allow_credit_borrow = FALSE;
+ ctx->borrow_defer_timestamp = OSL_SYSUPTIME();
+ return BCME_OK;
+ }
+
+ /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE)
+ Generically use "ac" only in case we extend to all ACs in future
+ */
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac);
+ if (commit_info.p == NULL)
+ break;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static uint8
+dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+ wlfc_mac_descriptor_t* table =
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+ uint8 table_index;
+
+ if (ea != NULL) {
+ for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+ if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+ table[table_index].occupied)
+ return table_index;
+ }
+ }
+ return WLFC_MAC_DESC_ID_INVALID;
+}
+
+void
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ void* p;
+ int fifo_id;
+
+ if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.signal_only_pkts_freed++;
+#endif
+ /* is this a signal-only packet? */
+ PKTFREE(wlfc->osh, txp, TRUE);
+ return;
+ }
+ if (!success) {
+ WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+ __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG
+ (PKTTAG(txp))), &p, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, txp, FALSE);
+
+ /* return the credit, if necessary */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) {
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp));
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+
+ PKTFREE(wlfc->osh, txp, TRUE);
+ }
+ return;
+}
+
+/* Handle discard or suppress indication */
+static int
+dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info)
+{
+ uint8 status_flag;
+ uint32 status;
+ int ret;
+ int remove_from_hanger = 1;
+ void* pktbuf;
+ uint8 fifo_id;
+ wlfc_mac_descriptor_t* entry = NULL;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ memcpy(&status, pkt_info, sizeof(uint32));
+ status_flag = WL_TXSTATUS_GET_FLAGS(status);
+ wlfc->stats.txstatus_in++;
+
+ if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+ wlfc->stats.pkt_freed++;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+ wlfc->stats.d11_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+ wlfc->stats.wl_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+ wlfc->stats.wlc_tossed_pkts++;
+ }
+
+ ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
+ WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
+ if (ret != BCME_OK) {
+ /* do something */
+ return ret;
+ }
+
+ if (!remove_from_hanger) {
+ /* this packet was suppressed */
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ entry->generation = WLFC_PKTID_GEN(status);
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ uint32 new_t = OSL_SYSUPTIME();
+ uint32 old_t;
+ uint32 delta;
+ old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
+ WLFC_PKTID_HSLOT_GET(status)].push_time;
+
+
+ wlfc->stats.latency_sample_count++;
+ if (new_t > old_t)
+ delta = new_t - old_t;
+ else
+ delta = 0xffffffff + new_t - old_t;
+ wlfc->stats.total_status_latency += delta;
+ wlfc->stats.latency_most_recent = delta;
+
+ wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+ if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+ wlfc->stats.idx_delta = 0;
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+ /* pick up the implicit credit from this packet */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
+
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+ }
+ else {
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the destination entry (for pspoll etc.)
+ */
+ if (!entry) {
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ }
+ if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+ entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_acks++;
+#endif
+ }
+ if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+ (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+ ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+ if (ret != BCME_OK) {
+ /* delay q is full, drop this packet */
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
+ &pktbuf, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, pktbuf, FALSE);
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ }
+ else {
+ dhd_txcomplete(dhd, pktbuf, TRUE);
+ /* free the packet */
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+ int i;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+ /* update FIFO credits */
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+ {
+ int lender; /* Note that borrower is i */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+ if (wlfc->credits_borrowed[i][lender] > 0) {
+ if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+ credits[i] -= wlfc->credits_borrowed[i][lender];
+ wlfc->FIFO_credit[lender] +=
+ wlfc->credits_borrowed[i][lender];
+ wlfc->credits_borrowed[i][lender] = 0;
+ }
+ else {
+ wlfc->credits_borrowed[i][lender] -= credits[i];
+ wlfc->FIFO_credit[lender] += credits[i];
+ credits[i] = 0;
+ }
+ }
+ }
+
+ /* If we have more credits left over, these must belong to the AC */
+ if (credits[i] > 0) {
+ wlfc->FIFO_credit[i] += credits[i];
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+ (void)dhd;
+ (void)rssi;
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ int rc;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 existing_index;
+ uint8 table_index;
+ uint8 ifid;
+ uint8* ea;
+
+ WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+ __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+ ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+ WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+ table = wlfc->destination_entries.nodes;
+ table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+ ifid = value[1];
+ ea = &value[2];
+
+ if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+ existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+ if (existing_index == WLFC_MAC_DESC_ID_INVALID) {
+ /* this MAC entry does not exist, create one */
+ if (!table[table_index].occupied) {
+ table[table_index].mac_handle = value[0];
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been empty, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ else {
+ /*
+ there is an existing entry, move it to new index
+ if necessary.
+ */
+ if (existing_index != table_index) {
+ /* if we already have an entry, free the old one */
+ table[existing_index].occupied = 0;
+ table[existing_index].state = WLFC_STATE_CLOSE;
+ table[existing_index].requested_credit = 0;
+ table[existing_index].interface_id = 0;
+ /* enable after packets are queued-deqeued properly.
+ pktq_flush(dhd->osh, &table[existing_index].psq, FALSE, NULL, 0);
+ */
+ }
+ }
+ }
+ if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+ if (table[table_index].occupied) {
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been occupied, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ BCM_REFERENCE(rc);
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle = value[0];
+ int i;
+
+ table = wlfc->destination_entries.nodes;
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ /* a fresh PS mode should wipe old ps credits? */
+ desc->requested_credit = 0;
+ if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+ desc->state = WLFC_STATE_OPEN;
+ DHD_WLFC_CTRINC_MAC_OPEN(desc);
+ }
+ else {
+ desc->state = WLFC_STATE_CLOSE;
+ DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+ /*
+ Indicate to firmware if there is any traffic pending.
+ */
+ for (i = AC_BE; i < AC_COUNT; i++) {
+ _dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+ }
+ }
+ }
+ else {
+ wlfc->stats.psmode_update_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 if_id = value[0];
+
+ if (if_id < WLFC_MAX_IFNUM) {
+ table = wlfc->destination_entries.interfaces;
+ if (table[if_id].occupied) {
+ if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+ table[if_id].state = WLFC_STATE_OPEN;
+ /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+ }
+ else {
+ table[if_id].state = WLFC_STATE_CLOSE;
+ /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+ }
+ return BCME_OK;
+ }
+ }
+ wlfc->stats.interface_update_failed++;
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 credit;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ credit = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_credit = credit;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.credit_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 packet_count;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ packet_count = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_packet = packet_count;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.packet_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static void
+dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+ if (info_len) {
+ if (info_buf) {
+ bcopy(val, info_buf, len);
+ *info_len = len;
+ }
+ else
+ *info_len = 0;
+ }
+}
+
+static int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+ uint *reorder_info_len)
+{
+ uint8 type, len;
+ uint8* value;
+ uint8* tmpbuf;
+ uint16 remainder = tlv_hdr_len;
+ uint16 processed = 0;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+ if (remainder) {
+ while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+ type = tmpbuf[processed];
+ if (type == WLFC_CTL_TYPE_FILLER) {
+ remainder -= 1;
+ processed += 1;
+ continue;
+ }
+
+ len = tmpbuf[processed + 1];
+ value = &tmpbuf[processed + 2];
+
+ if (remainder < (2 + len))
+ break;
+
+ remainder -= 2 + len;
+ processed += 2 + len;
+ if (type == WLFC_CTL_TYPE_TXSTATUS)
+ dhd_wlfc_txstatus_update(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+ dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+ reorder_info_len);
+ else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+ dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_RSSI)
+ dhd_wlfc_rssi_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+ dhd_wlfc_credit_request(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+ dhd_wlfc_packet_request(dhd, value);
+
+ else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+ (type == WLFC_CTL_TYPE_MAC_CLOSE))
+ dhd_wlfc_psmode_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+ (type == WLFC_CTL_TYPE_MACDESC_DEL))
+ dhd_wlfc_mac_table_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+ (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+ dhd_wlfc_interface_update(dhd, value, type);
+ }
+ }
+ if (remainder != 0) {
+ /* trouble..., something is not right */
+ wlfc->stats.tlv_parse_failed++;
+ }
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+ char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */
+ /* enable all signals & indicate host proptxstatus logic is active */
+ uint32 tlv = dhd->wlfc_enabled?
+ WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0;
+ /* WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0; */
+
+
+ /*
+ try to enable/disable signaling by sending "tlv" iovar. if that fails,
+ fallback to no flow control? Print a message for now.
+ */
+
+ /* enable proptxtstatus signaling by default */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+ }
+ else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+ int i;
+ athost_wl_status_info_t* wlfc;
+
+ if (!dhd->wlfc_enabled || dhd->wlfc_state)
+ return BCME_OK;
+
+ /* allocate space to track txstatus propagated from firmware */
+ dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+ if (dhd->wlfc_state == NULL)
+ return BCME_NOMEM;
+
+ /* initialize state space */
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+ /* remember osh & dhdp */
+ wlfc->osh = dhd->osh;
+ wlfc->dhdp = dhd;
+
+ wlfc->hanger =
+ dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+ if (wlfc->hanger == NULL) {
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return BCME_NOMEM;
+ }
+
+ /* initialize all interfaces to accept traffic */
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ wlfc->hostif_flow_state[i] = OFF;
+ }
+
+ /*
+ create the SENDQ containing
+ sub-queues for all AC precedences + 1 for bc/mc traffic
+ */
+ pktq_init(&wlfc->SENDQ, (AC_COUNT + 1), WLFC_SENDQ_LEN);
+
+ wlfc->destination_entries.other.state = WLFC_STATE_OPEN;
+ /* bc/mc FIFO is always open [credit aside], i.e. b[5] */
+ wlfc->destination_entries.other.ac_bitmap = 0x1f;
+ wlfc->destination_entries.other.interface_id = 0;
+
+ wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+
+ wlfc->allow_credit_borrow = TRUE;
+ wlfc->borrow_defer_timestamp = 0;
+
+ return BCME_OK;
+}
+
+/* release all packet resources */
+void
+dhd_wlfc_cleanup(dhd_pub_t *dhd)
+{
+ int i;
+ int total_entries;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_hanger_t* h;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+ for (i = 0; i < total_entries; i++) {
+ if (table[i].occupied) {
+ if (table[i].psq.len) {
+ WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n",
+ __FUNCTION__, i, table[i].psq.len));
+ /* release packets held in DELAYQ */
+ pktq_flush(wlfc->osh, &table[i].psq, TRUE, NULL, 0);
+ }
+ table[i].occupied = 0;
+ }
+ }
+ /* release packets held in SENDQ */
+ if (wlfc->SENDQ.len)
+ pktq_flush(wlfc->osh, &wlfc->SENDQ, TRUE, NULL, 0);
+ /* any in the hanger? */
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
+ }
+ }
+ return;
+}
+
+void
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+ /* cleanup all psq related resources */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+ __FUNCTION__, i, h->items[i].pkt,
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+ }
+ }
+ }
+#endif
+ /* delete hanger */
+ dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+
+ /* free top structure */
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return;
+}
+#endif /* PROP_TXSTATUS */
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state)
+ dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif /* BDC */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Push BDC header used to convey priority for buses that don't */
+
+ PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(pktbuf))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = 0;
+#endif /* BDC */
+ BDC_SET_IF_IDX(h, ifidx);
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+ uint *reorder_info_len)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ if (reorder_info_len)
+ *reorder_info_len = 0;
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+ __FUNCTION__, *ifidx));
+ return BCME_ERROR;
+ }
+
+#if defined(NDIS630)
+ h->dataOffset = 0;
+#endif
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+ DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+ h->dataOffset = 0;
+ else
+ return BCME_ERROR;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ PKTSETSUMGOOD(pktbuf, TRUE);
+ }
+
+ PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+#if !defined(NDIS630)
+ if (PKTLEN(dhd->osh, pktbuf) < (uint32) (h->dataOffset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), (h->dataOffset * 4)));
+ return BCME_ERROR;
+ }
+#endif
+#ifdef PROP_TXSTATUS
+ if (dhd->wlfc_state &&
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE &&
+ (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) {
+ /*
+ - parse txstatus only for packets that came from the firmware
+ */
+ dhd_os_wlfc_block(dhd);
+ dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2),
+ reorder_buf_info, reorder_info_len);
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++;
+ dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ (void *)dhd->bus);
+ dhd_os_wlfc_unblock(dhd);
+ }
+#endif /* PROP_TXSTATUS */
+#if !defined(NDIS630)
+ PKTPULL(dhd->osh, pktbuf, (h->dataOffset << 2));
+#endif
+ return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *cdc;
+
+ if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT,
+ sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(cdc, 0, sizeof(dhd_prot_t));
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+ DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ goto fail;
+ }
+
+ dhd->prot = cdc;
+#ifdef BDC
+ dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+ dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ if (cdc != NULL)
+ MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(dhd);
+#endif
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ /* No stats from dongle added yet, copy bus stats */
+ dhd->dstats.tx_packets = dhd->tx_packets;
+ dhd->dstats.tx_errors = dhd->tx_errors;
+ dhd->dstats.rx_packets = dhd->rx_packets;
+ dhd->dstats.rx_errors = dhd->rx_errors;
+ dhd->dstats.rx_dropped = dhd->rx_dropped;
+ dhd->dstats.multicast = dhd->rx_multicast;
+ return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0)
+ goto done;
+
+
+#ifdef PROP_TXSTATUS
+ ret = dhd_wlfc_init(dhd);
+#endif
+
+#if defined(WL_CFG80211)
+ if (dhd_download_fw_on_driverload)
+#endif /* defined(WL_CFG80211) */
+ ret = dhd_preinit_ioctls(dhd);
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+
+done:
+ return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+ /* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+ uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+ uint i;
+ void *plast = NULL, *p;
+ uint32 pkt_cnt = 0;
+
+ if (ptr->pend_pkts == 0) {
+ DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+ *pplast = NULL;
+ *pkt_count = 0;
+ *pkt = NULL;
+ return;
+ }
+ if (start == end)
+ i = ptr->max_idx + 1;
+ else {
+ if (start > end)
+ i = (ptr->max_idx - end) + start;
+ else
+ i = end - start;
+ }
+ while (i) {
+ p = (void *)(ptr->p[start]);
+ ptr->p[start] = NULL;
+
+ if (p != NULL) {
+ if (plast == NULL)
+ *pkt = p;
+ else
+ PKTSETNEXT(osh, plast, p);
+
+ plast = p;
+ pkt_cnt++;
+ }
+ i--;
+ if (start++ == ptr->max_idx)
+ start = 0;
+ }
+ *pplast = plast;
+ *pkt_count = (uint32)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+ void **pkt, uint32 *pkt_count)
+{
+ uint8 flow_id, max_idx, cur_idx, exp_idx;
+ struct reorder_info *ptr;
+ uint8 flags;
+ void *cur_pkt, *plast = NULL;
+ uint32 cnt = 0;
+
+ if (pkt == NULL) {
+ if (pkt_count != NULL)
+ *pkt_count = 0;
+ return 0;
+ }
+ cur_pkt = *pkt;
+ *pkt = NULL;
+
+ flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+ flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+ DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+ reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+
+ ptr = dhd->reorder_bufs[flow_id];
+ if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+ __FUNCTION__, flow_id));
+
+ if (ptr == NULL) {
+ DHD_ERROR(("%s: received flags to cleanup, but no flow (%d) yet\n",
+ __FUNCTION__, flow_id));
+ *pkt_count = 1;
+ return 0;
+ }
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ /* set it to the last packet */
+ if (plast) {
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ cnt++;
+ }
+ else {
+ if (cnt != 0) {
+ DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+ __FUNCTION__, cnt));
+ }
+ *pkt = cur_pkt;
+ cnt = 1;
+ }
+ buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+ MFREE(dhd->osh, ptr, buf_size);
+ dhd->reorder_bufs[flow_id] = NULL;
+ *pkt_count = cnt;
+ return 0;
+ }
+ /* all the other cases depend on the existance of the reorder struct for that flow id */
+ if (ptr == NULL) {
+ uint32 buf_size_alloc = sizeof(reorder_info_t);
+ max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+ buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+ /* allocate space to hold the buffers, index etc */
+
+ DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+ __FUNCTION__, buf_size_alloc, flow_id, max_idx));
+ ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+ if (ptr == NULL) {
+ DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+ bzero(ptr, buf_size_alloc);
+ dhd->reorder_bufs[flow_id] = ptr;
+ ptr->p = (void *)(ptr+1);
+ ptr->max_idx = max_idx;
+ }
+ if (flags & WLHOST_REORDERDATA_NEW_HOLE) {
+ DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+ if (ptr->pend_pkts) {
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ ptr->pend_pkts = 0;
+ }
+ ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+ ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+ ptr->p[ptr->cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ *pkt_count = cnt;
+ }
+ else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+ cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+ if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ ptr->cur_idx = cur_idx;
+ DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+ __FUNCTION__, ptr->pend_pkts));
+ *pkt_count = 0;
+ *pkt = NULL;
+ }
+ else if (ptr->exp_idx == cur_idx) {
+ /* got the right one ..flush from cur to exp and update exp */
+ DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+ __FUNCTION__, cur_idx));
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: Error buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+
+ ptr->cur_idx = cur_idx;
+ ptr->exp_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ cur_idx, exp_idx);
+ ptr->pend_pkts -= (uint8)cnt;
+ *pkt_count = cnt;
+ DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+ __FUNCTION__, cnt, ptr->pend_pkts));
+ }
+ else {
+ uint8 end_idx;
+ bool flush_current = FALSE;
+ /* both cur and exp are moved now .. */
+ DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+ __FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+ ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, end_idx);
+
+ if (cur_idx == ptr->max_idx) {
+ if (exp_idx == 0)
+ flush_current = TRUE;
+ } else {
+ if (exp_idx == cur_idx + 1)
+ flush_current = TRUE;
+ }
+ if (flush_current) {
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ }
+ else {
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ }
+ ptr->exp_idx = exp_idx;
+ ptr->cur_idx = cur_idx;
+ *pkt_count = cnt;
+ }
+ }
+ else {
+ uint8 end_idx;
+ /* no real packet but update to exp_seq...that means explicit window move */
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+ DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+ __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+ ptr->pend_pkts -= (uint8)cnt;
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ *pkt_count = cnt;
+ /* set the new expected idx */
+ ptr->exp_idx = exp_idx;
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
new file mode 100644
index 000000000000..556459076b11
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -0,0 +1,673 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
+extern struct wl_priv *wlcfg_drv_priv;
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_down(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd->op_mode |= val;
+ WL_ERR(("Set : op_mode=%d\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF P2P is enabled, disable arpoe */
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, false);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd->op_mode &= ~CONCURENT_MASK;
+ WL_ERR(("Clean : op_mode=%d\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF P2P is disabled, enable arpoe back for STA mode. */
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ dhd_arp_offload_enable(dhd, true);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ return 0;
+}
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+{
+ s32 err = 0;
+
+ err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ }
+ return err;
+}
+s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+ struct net_device *ndev;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (dhd_dongle_up) {
+ WL_ERR(("Dongle is already up\n"));
+ return err;
+ }
+
+ ndev = wl_to_prmry_ndev(wl);
+
+ if (need_lock)
+ rtnl_lock();
+
+ err = wl_dongle_up(ndev, 0);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_up failed\n"));
+ goto default_conf_out;
+ }
+ dhd_dongle_up = true;
+
+default_conf_out:
+ if (need_lock)
+ rtnl_unlock();
+ return err;
+
+}
+
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+#define COEX_DHCP
+
+#if defined(COEX_DHCP)
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME 2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+ BT_DHCP_IDLE,
+ BT_DHCP_START,
+ BT_DHCP_OPPR_WIN,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+ uint reg, int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+ (char *)(&var), sizeof(var.buf));
+ error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf_local[1024];
+#else
+ static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+ return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE(("%s, sample[%d], btc params: 27:%x\n",
+ __FUNCTION__, i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERR(("%s ioc read btc params error\n", __FUNCTION__));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) { /* count both sco & esco */
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ __FUNCTION__, sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] =
+ { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] =
+ { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] =
+ { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] =
+ { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] =
+ { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+ /* this should reduce eSCO agressive retransmit
+ * w/o breaking it
+ */
+
+ /* 1st save current */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+ saved_status = TRUE;
+ WL_TRACE(("%s saved bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __FUNCTION__, saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+ } else {
+ WL_ERR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ WL_TRACE(("override with [50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+ } else if (saved_status) {
+ /* restore previously saved bt params */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+ /* set = 1, save & turn on 0 - off & restore prev settings */
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE)
+ /* Forcing bt_flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0],
+ sizeof(buf_flag7_dhcp_on));
+ else
+ /* Restoring default bt flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0],
+ sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+ struct btcoex_info *bt_local = (struct btcoex_info *)data;
+ WL_TRACE(("%s\n", __FUNCTION__));
+ bt_local->timer_on = 0;
+ schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+ struct btcoex_info *btcx_inf;
+
+ btcx_inf = container_of(work, struct btcoex_info, work);
+
+ if (btcx_inf->timer_on) {
+ btcx_inf->timer_on = 0;
+ del_timer_sync(&btcx_inf->timer);
+ }
+
+ switch (btcx_inf->bt_state) {
+ case BT_DHCP_START:
+ /* DHCP started
+ * provide OPPORTUNITY window to get DHCP address
+ */
+ WL_TRACE(("%s bt_dhcp stm: started \n",
+ __FUNCTION__));
+ btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_OPPR_WIN_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPR_WIN:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T1 expiration\n",
+ __FUNCTION__));
+ goto btc_coex_idle;
+ }
+
+ /* DHCP is not over yet, start lowering BT priority
+ * enforce btc_params + flags if necessary
+ */
+ WL_TRACE(("%s DHCP T1:%d expired\n", __FUNCTION__,
+ BT_DHCP_OPPR_WIN_TIME));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+ btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T2 expiration\n",
+ __FUNCTION__));
+ } else {
+ /* Noo dhcp during T1+T2, restore BT priority */
+ WL_TRACE(("%s DHCP wait interval T2:%d"
+ "msec expired\n", __FUNCTION__,
+ BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ /* Restoring default bt priority */
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+
+ default:
+ WL_ERR(("%s error g_status=%d !!!\n", __FUNCTION__,
+ btcx_inf->bt_state));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(btcx_inf->dev);
+}
+
+int wl_cfg80211_btcoex_init(struct wl_priv *wl)
+{
+ struct btcoex_info *btco_inf = NULL;
+
+ btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+ if (!btco_inf)
+ return -ENOMEM;
+
+ btco_inf->bt_state = BT_DHCP_IDLE;
+ btco_inf->ts_dhcp_start = 0;
+ btco_inf->ts_dhcp_ok = 0;
+ /* Set up timer for BT */
+ btco_inf->timer_ms = 10;
+ init_timer(&btco_inf->timer);
+ btco_inf->timer.data = (ulong)btco_inf;
+ btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+ btco_inf->dev = wl->wdev->netdev;
+
+ INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+ wl->btcoex_info = btco_inf;
+ return 0;
+}
+
+void wl_cfg80211_btcoex_deinit(struct wl_priv *wl)
+{
+ if (!wl->btcoex_info)
+ return;
+
+ if (!wl->btcoex_info->timer_on) {
+ wl->btcoex_info->timer_on = 0;
+ del_timer_sync(&wl->btcoex_info->timer);
+ }
+
+ cancel_work_sync(&wl->btcoex_info->work);
+
+ kfree(wl->btcoex_info);
+ wl->btcoex_info = NULL;
+}
+#endif
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command)
+{
+
+ struct wl_priv *wl = wlcfg_drv_priv;
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+#ifdef COEX_DHCP
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+ struct btcoex_info *btco_inf = wl->btcoex_info;
+#endif /* COEX_DHCP */
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ int i;
+#endif
+
+ /* Figure out powermode 1 or o command */
+ strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 1;
+
+ /* Disable packet filtering */
+ if (dhd_pkt_filter_enable && dhd->early_suspended) {
+ WL_TRACE(("DHCP in progressing , disable packet filter!!!\n"));
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ 0, dhd_master_mode);
+ }
+ }
+#endif
+
+ /* Retrieve and saved orig regs value */
+ if ((saved_status == FALSE) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+ /* Disable PM mode during dhpc session */
+
+ /* Disable PM mode during dhpc session */
+#ifdef COEX_DHCP
+ /* Start BT timer only for SCO connection */
+ if (btcoex_is_sco_active(dev)) {
+ /* btc_params 66 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+ /* btc_params 41 0x33 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+ /* btc_params 68 0x190 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ btco_inf->bt_state = BT_DHCP_START;
+ btco_inf->timer_on = 1;
+ mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+ WL_TRACE(("%s enable BT DHCP Timer\n",
+ __FUNCTION__));
+ }
+#endif /* COEX_DHCP */
+ }
+ else if (saved_status == TRUE) {
+ WL_ERR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+ }
+ }
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 0;
+
+ /* Enable packet filtering */
+ if (dhd_pkt_filter_enable && dhd->early_suspended) {
+ WL_TRACE(("DHCP is complete , enable packet filter!!!\n"));
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ 1, dhd_master_mode);
+ }
+ }
+#endif
+
+ /* Restoring PM mode */
+
+#ifdef COEX_DHCP
+ /* Stop any bt timer because DHCP session is done */
+ WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__));
+ if (btco_inf->timer_on) {
+ btco_inf->timer_on = 0;
+ del_timer_sync(&btco_inf->timer);
+
+ if (btco_inf->bt_state != BT_DHCP_IDLE) {
+ /* need to restore original btc flags & extra btc params */
+ WL_TRACE(("%s bt->bt_state:%d\n",
+ __FUNCTION__, btco_inf->bt_state));
+ /* wake up btcoex thread to restore btlags+params */
+ schedule_work(&btco_inf->work);
+ }
+ }
+
+ /* Restoring btc_flag paramter anyway */
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+#endif /* COEX_DHCP */
+
+ /* Restore original values */
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ snprintf(command, 3, "OK");
+
+ return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
new file mode 100644
index 000000000000..922d6edde00e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -0,0 +1,44 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+s32 dhd_cfg80211_init(struct wl_priv *wl);
+s32 dhd_cfg80211_deinit(struct wl_priv *wl);
+s32 dhd_cfg80211_down(struct wl_priv *wl);
+s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl);
+s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock);
+
+int wl_cfg80211_btcoex_init(struct wl_priv *wl);
+void wl_cfg80211_btcoex_deinit(struct wl_priv *wl);
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 000000000000..580355401c0d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2337 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c 327331 2012-04-13 01:42:33Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+
+#include <proto/bcmevent.h>
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef WLBTAMP
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+ __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+ IOV_VERSION = 1,
+ IOV_MSGLEVEL,
+ IOV_BCMERRORSTR,
+ IOV_BCMERROR,
+ IOV_WDTICK,
+ IOV_DUMP,
+ IOV_CLEARCOUNTS,
+ IOV_LOGDUMP,
+ IOV_LOGCAL,
+ IOV_LOGSTAMP,
+ IOV_GPIOOB,
+ IOV_IOCTLTIMEOUT,
+#ifdef WLBTAMP
+ IOV_HCI_CMD, /* HCI command */
+ IOV_HCI_ACL_DATA, /* HCI data packet */
+#endif
+#if defined(DHD_DEBUG)
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+ IOV_PROPTXSTATUS_ENABLE,
+ IOV_PROPTXSTATUS_MODE,
+#endif
+ IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+ IOV_WLPKTDLYSTAT_SZ,
+#endif
+ IOV_CHANGEMTU,
+ IOV_HOSTREORDER_FLOWS,
+ IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+ {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN },
+ {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 },
+ {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 },
+ {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+ {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 },
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 },
+#endif
+ {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
+ {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
+#ifdef WLBTAMP
+ {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0},
+ {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
+#endif
+#ifdef PROP_TXSTATUS
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 },
+ /*
+ set the proptxtstatus operation mode:
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
+#endif
+ {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+ {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+ {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+ {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
+ (WLHOST_REORDERDATA_MAXFLOWS + 1) },
+ {NULL, 0, 0, 0, 0 }
+};
+
+void
+dhd_common_init(osl_t *osh)
+{
+#ifdef CONFIG_BCMDHD_FW_PATH
+ bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_FW_PATH */
+ fw_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_NVRAM_PATH */
+ nv_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+#ifdef SOFTAP
+ fw_path2[0] = '\0';
+#endif
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ /* Base DHD info */
+ bcm_bprintf(strbuf, "%s\n", dhd_version);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ dhdp->up, dhdp->txoff, dhdp->busstate);
+ bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+ dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+ dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+ bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+ bcm_bprintf(strbuf, "dongle stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+ dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+ dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+ dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+ dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+ bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+ bcm_bprintf(strbuf, "bus stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+ dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+ dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+ dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+ bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n",
+ dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+ bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n",
+ dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any prot info */
+ dhd_prot_dump(dhdp, strbuf);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any bus info */
+ dhd_bus_dump(dhdp, strbuf);
+
+ return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+{
+ wl_ioctl_t ioc;
+
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+}
+
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+{
+ int ret;
+
+ dhd_os_proto_block(dhd_pub);
+
+ ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
+ if (!ret)
+ dhd_os_check_hang(dhd_pub, ifindex, ret);
+
+ dhd_os_proto_unblock(dhd_pub);
+ return ret;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_VERSION):
+ /* Need to have checked buffer length */
+ bcm_strncpy_s((char*)arg, len, dhd_version, len);
+ break;
+
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)dhd_msg_level;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ dhd_msg_level = int_val;
+#ifdef WL_CFG80211
+ /* Enable DHD and WL logs in oneshot */
+ if (dhd_msg_level & DHD_WL_VAL)
+ wl_cfg80211_enable_trace(dhd_msg_level);
+#endif
+ break;
+ case IOV_GVAL(IOV_BCMERRORSTR):
+ bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+ ((char *)arg)[BCME_STRLEN - 1] = 0x00;
+ break;
+
+ case IOV_GVAL(IOV_BCMERROR):
+ int_val = (int32)dhd_pub->bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (int32)dhd_watchdog_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WDTICK):
+ if (!dhd_pub->up) {
+ bcmerror = BCME_NOTUP;
+ break;
+ }
+ dhd_os_wd_timer(dhd_pub, (uint)int_val);
+ break;
+
+ case IOV_GVAL(IOV_DUMP):
+ bcmerror = dhd_dump(dhd_pub, arg, len);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (int32)dhd_console_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ dhd_console_ms = (uint)int_val;
+ break;
+
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0)
+ bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+ break;
+#endif /* DHD_DEBUG */
+
+ case IOV_SVAL(IOV_CLEARCOUNTS):
+ dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+ dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+ dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+ dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->rx_dropped = 0;
+ dhd_pub->rx_readahead_cnt = 0;
+ dhd_pub->tx_realloc = 0;
+ dhd_pub->wd_dpc_sched = 0;
+ memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+ dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+ /* clear proptxstatus related counters */
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc_hanger_t* hanger;
+
+ memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+ hanger->pushed = 0;
+ hanger->popped = 0;
+ hanger->failed_slotfind = 0;
+ hanger->failed_to_pop = 0;
+ hanger->failed_to_push = 0;
+ }
+#endif /* PROP_TXSTATUS */
+ break;
+
+
+ case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+ int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+ break;
+ }
+
+#ifdef WLBTAMP
+ case IOV_SVAL(IOV_HCI_CMD): {
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
+
+ /* sanity check: command preamble present */
+ if (len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: command parameters are present */
+ if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_docmd(dhd_pub, cmd, len);
+ break;
+ }
+
+ case IOV_SVAL(IOV_HCI_ACL_DATA): {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
+
+ /* sanity check: HCI header present */
+ if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: ACL data is present */
+ if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
+ break;
+ }
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+ case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
+ int_val = dhd_pub->wlfc_enabled? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
+ dhd_pub->wlfc_enabled = int_val? 1 : 0;
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc->proptxstatus_mode = int_val & 0xff;
+ }
+ break;
+#endif /* PROP_TXSTATUS */
+
+ case IOV_GVAL(IOV_BUS_TYPE):
+ /* The dhd application queries the driver to check if its usb or sdio. */
+#ifdef BCMDHDUSB
+ int_val = BUS_TYPE_USB;
+#endif
+ int_val = BUS_TYPE_SDIO;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+ int_val = dhd_pub->htsfdlystat_sz;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+ dhd_pub->htsfdlystat_sz = int_val & 0xff;
+ printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+ break;
+#endif
+ case IOV_SVAL(IOV_CHANGEMTU):
+ int_val &= 0xffff;
+ bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+ {
+ uint i = 0;
+ uint8 *ptr = (uint8 *)arg;
+ uint8 count = 0;
+
+ ptr++;
+ for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+ if (dhd_pub->reorder_bufs[i] != NULL) {
+ *ptr = dhd_pub->reorder_bufs[i]->flow_id;
+ ptr++;
+ count++;
+ }
+ }
+ ptr = (uint8 *)arg;
+ *ptr = count;
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+ return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+ /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+ * because an encryption/rsn mismatch results in both events, and
+ * the important information is in the WLC_E_PRUNE.
+ */
+ if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+ dhd_conn_event == WLC_E_PRUNE)) {
+ dhd_conn_event = event;
+ dhd_conn_status = status;
+ dhd_conn_reason = reason;
+ }
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+ void *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ pktq_penq(q, prec, pkt);
+ return TRUE;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = pktq_peek_tail(q, &eprec);
+ ASSERT(p);
+ if (eprec > prec || eprec < 0)
+ return FALSE;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktq_pempty(q, eprec));
+ discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return FALSE; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+ ASSERT(p);
+
+ PKTFREE(dhdp->osh, p, TRUE);
+ }
+
+ /* Enqueue */
+ p = pktq_penq(q, prec, pkt);
+ ASSERT(p);
+
+ return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+ int val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+ bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+ int bcmerror = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!buf) {
+ return BCME_BADARG;
+ }
+
+ switch (ioc->cmd) {
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
+
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = -BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
+
+ case DHD_GET_VAR:
+ case DHD_SET_VAR: {
+ char *arg;
+ uint arglen;
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+ ;
+
+ if (*arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ /* account for the NUL terminator */
+ arg++, arglen--;
+
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+ uint i, status, reason;
+ bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ const char *auth_str;
+ const char *event_name;
+ uchar *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ uint event_type, flags, auth_type, datalen;
+
+ event_type = ntoh32(event->event_type);
+ flags = ntoh16(event->flags);
+ status = ntoh32(event->status);
+ reason = ntoh32(event->reason);
+ BCM_REFERENCE(reason);
+ auth_type = ntoh32(event->auth_type);
+ datalen = ntoh32(event->datalen);
+
+ /* debug dump of event messages */
+ sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (uchar)event->addr.octet[0]&0xff,
+ (uchar)event->addr.octet[1]&0xff,
+ (uchar)event->addr.octet[2]&0xff,
+ (uchar)event->addr.octet[3]&0xff,
+ (uchar)event->addr.octet[4]&0xff,
+ (uchar)event->addr.octet[5]&0xff);
+
+ event_name = "UNKNOWN";
+ for (i = 0; i < (uint)bcmevent_names_size; i++)
+ if (bcmevent_names[i].event == event_type)
+ event_name = bcmevent_names[i].name;
+
+ if (flags & WLC_EVENT_MSG_LINK)
+ link = TRUE;
+ if (flags & WLC_EVENT_MSG_GROUP)
+ group = TRUE;
+ if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ flush_txq = TRUE;
+
+ switch (event_type) {
+ case WLC_E_START:
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC:
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status));
+ }
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+ break;
+
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ if (auth_type == DOT11_OPEN_SYSTEM)
+ auth_str = "Open System";
+ else if (auth_type == DOT11_SHARED_KEY)
+ auth_str = "Shared Key";
+ else {
+ sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+ if (event_type == WLC_E_AUTH_IND) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
+ }
+ BCM_REFERENCE(auth_str);
+
+ break;
+
+ case WLC_E_JOIN:
+ case WLC_E_ROAM:
+ case WLC_E_SET_SSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ break;
+
+ case WLC_E_BEACON_RX:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+ }
+ break;
+
+ case WLC_E_LINK:
+ DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+ BCM_REFERENCE(link);
+ break;
+
+ case WLC_E_MIC_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
+ BCM_REFERENCE(group);
+ BCM_REFERENCE(flush_txq);
+ break;
+
+ case WLC_E_ICV_ERROR:
+ case WLC_E_UNICAST_DECODE_ERROR:
+ case WLC_E_MULTICAST_DECODE_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+ event_name, eabuf));
+ break;
+
+ case WLC_E_TXFAIL:
+ DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_SCAN_COMPLETE:
+ case WLC_E_ASSOC_REQ_IE:
+ case WLC_E_ASSOC_RESP_IE:
+ case WLC_E_PMKID_CACHE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_NET_LOST:
+ case WLC_E_PFN_SCAN_COMPLETE:
+ case WLC_E_PFN_SCAN_NONE:
+ case WLC_E_PFN_SCAN_ALLGONE:
+ DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PSK_SUP:
+ case WLC_E_PRUNE:
+ DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+ break;
+
+#ifdef WIFI_ACT_FRAME
+ case WLC_E_ACTION_FRAME:
+ DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+ break;
+#endif /* WIFI_ACT_FRAME */
+
+ case WLC_E_TRACE: {
+ static uint32 seqnum_prev = 0;
+ msgtrace_hdr_t hdr;
+ uint32 nblost;
+ char *s, *p;
+
+ buf = (uchar *) event_data;
+ memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ printf("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version);
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+ /* There are 2 bytes available at the end of data */
+ buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+ if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+ printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+ }
+
+ nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0) {
+ printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost);
+ }
+ seqnum_prev = ntoh32(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ while ((s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ printf("%s\n", p);
+ p = s+1;
+ }
+ printf("%s\n", p);
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+
+ case WLC_E_RSSI:
+ DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+ break;
+
+ default:
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ break;
+ }
+
+ /* show any appended data */
+ if (datalen) {
+ buf = (uchar *) event_data;
+ DHD_EVENT((" data (%d) : ", datalen));
+ for (i = 0; i < datalen; i++)
+ DHD_EVENT((" 0x%02x ", *buf++));
+ DHD_EVENT(("\n"));
+ }
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data_ptr)
+{
+ /* check whether packet is a BRCM event pkt */
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ uint8 *event_data;
+ uint32 type, status, datalen;
+ uint16 flags;
+ int evlen;
+
+ if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+ if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+ DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ *data_ptr = &pvt_data[1];
+ event_data = *data_ptr;
+
+ /* memcpy since BRCM event pkt may be unaligned. */
+ memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+ type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ datalen = ntoh32_ua((void *)&event->datalen);
+ evlen = datalen + sizeof(bcm_event_t);
+
+ switch (type) {
+#ifdef PROP_TXSTATUS
+ case WLC_E_FIFO_CREDIT_MAP:
+ dhd_wlfc_event(dhd_pub->info);
+ dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
+ WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+ "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+ event_data[2],
+ event_data[3], event_data[4], event_data[5]));
+ break;
+#endif
+
+ case WLC_E_IF:
+ {
+ dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+#ifdef PROP_TXSTATUS
+ {
+ uint8* ea = pvt_data->eth.ether_dhost;
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+ "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ ifevent->ifidx,
+ ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
+ ((ifevent->is_AP == 0) ? "STA":"AP "),
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+ (void)ea;
+ if (ifevent->action == WLC_E_IF_CHANGE)
+ dhd_wlfc_interface_event(dhd_pub->info,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ ifevent->ifidx, ifevent->is_AP, ea);
+ else
+ dhd_wlfc_interface_event(dhd_pub->info,
+ ((ifevent->action == WLC_E_IF_ADD) ?
+ eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+ ifevent->ifidx, ifevent->is_AP, ea);
+
+
+ /* dhd already has created an interface by default, for 0 */
+ if (ifevent->ifidx == 0)
+ break;
+ }
+#endif /* PROP_TXSTATUS */
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange()) {
+ DHD_ERROR(("%s: ifidx %d for %s action %d\n",
+ __FUNCTION__, ifevent->ifidx,
+ event->ifname, ifevent->action));
+ if (ifevent->action == WLC_E_IF_ADD)
+ wl_cfg80211_notify_ifchange();
+ return (BCME_OK);
+ }
+#endif /* WL_CFG80211 */
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+ if (ifevent->action == WLC_E_IF_ADD) {
+ if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
+ NULL, event->ifname,
+ event->addr.octet,
+ ifevent->flags, ifevent->bssidx)) {
+ DHD_ERROR(("%s: dhd_add_if failed!!"
+ " ifidx: %d for %s\n",
+ __FUNCTION__,
+ ifevent->ifidx,
+ event->ifname));
+ return (BCME_ERROR);
+ }
+ }
+ else if (ifevent->action == WLC_E_IF_DEL)
+ dhd_del_if(dhd_pub->info, ifevent->ifidx);
+ } else {
+#ifndef PROP_TXSTATUS
+ DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+ }
+ }
+ /* send up the if event: btamp user needs it */
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case WLC_E_HTSFSYNC:
+ htsf_update(dhd_pub->info, event_data);
+ break;
+#endif /* WLMEDIA_HTSF */
+#if defined(NDIS630)
+ case WLC_E_NDIS_LINK:
+ break;
+#else /* defined(NDIS630) && defined(BCMDONGLEHOST) */
+ case WLC_E_NDIS_LINK: {
+ uint32 temp = hton32(WLC_E_LINK);
+
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+#endif
+ /* These are what external supplicant/authenticator wants */
+ /* fall through */
+ case WLC_E_LINK:
+ case WLC_E_DEAUTH:
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ /* fall through */
+ default:
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ BCM_REFERENCE(flags);
+ BCM_REFERENCE(status);
+
+ /* put it back to WLC_E_NDIS_LINK */
+ if (type == WLC_E_NDIS_LINK) {
+ uint32 temp;
+
+ temp = ntoh32_ua((void *)&event->event_type);
+ DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+ temp = ntoh32(WLC_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+ break;
+ }
+
+#ifdef SHOW_EVENTS
+ wl_show_host_event(event, (void *)event_data);
+#endif /* SHOW_EVENTS */
+
+ return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+ int i, j = 0;
+ unsigned char *buf = pbuf;
+
+ if (bytes_per_line == 0) {
+ bytes_per_line = len;
+ }
+
+ for (i = 0; i < len; i++) {
+ printf("%2.2x", *buf++);
+ j++;
+ if (j == bytes_per_line) {
+ printf("\n");
+ j = 0;
+ } else {
+ printf(":");
+ }
+ }
+ printf("\n");
+#endif /* DHD_DEBUG */
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ bcm_strncpy_s(num, sizeof(num), src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = 0, *arg_org = 0;
+ int rc;
+ char buf[128];
+ wl_pkt_filter_enable_t enable_parm;
+ wl_pkt_filter_enable_t * pkt_filterp;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, sizeof(buf), str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = htod32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp,
+ &enable_parm,
+ sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+ /* Contorl the master mode */
+ bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc;
+ uint32 mask_size;
+ uint32 pattern_size;
+ char *argv[8], * buf = 0;
+ int i = 0;
+ char *arg_save = 0, *arg_org = 0;
+#define BUF_SIZE 2048
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ arg_org = arg_save;
+
+ if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ if (strlen(arg) > BUF_SIZE) {
+ DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+ goto fail;
+ }
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ while (argv[i++])
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+ buf[ str_len ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Polarity not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Filter type not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Offset not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Bitmask not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ mask_size =
+ htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Pattern not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ pattern_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("Mask and pattern not the same size\n"));
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
+ ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ ** guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+ if (buf)
+ MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ __FUNCTION__, arp_mode, retcode));
+ else
+ DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ __FUNCTION__, arp_enable, retcode));
+ else
+ DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr)
+{
+ int iov_len = 0;
+ char iovbuf[32];
+ int retcode;
+
+ iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+
+ if (retcode)
+ DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+ __FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+ int retcode, i;
+ int iov_len;
+ uint32 *ptr32 = buf;
+ bool clr_bottom = FALSE;
+
+ if (!buf)
+ return -1;
+
+ iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+ BCM_REFERENCE(iov_len);
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, 0);
+
+ if (retcode) {
+ DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, retcode));
+
+ return -1;
+ }
+
+ /* clean up the buf, ascii reminder */
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (!clr_bottom) {
+ if (*ptr32 == 0)
+ clr_bottom = TRUE;
+ } else {
+ *ptr32 = 0;
+ }
+ ptr32++;
+ }
+
+ return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+ case WLC_E_BTA_HCI_EVENT:
+ break;
+#endif /* WLBTAMP */
+ default:
+ break;
+ }
+
+ /* Call per-port handler. */
+ dhd_sendup_event(dhdp, event, data);
+}
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id = 0;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+ iscan_buf_t *iscanbuf_alloc = 0;
+ iscan_buf_t *iscanbuf_head;
+
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+ dhd_iscan_lock();
+
+ iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+ if (iscanbuf_alloc == NULL)
+ goto fail;
+
+ iscanbuf_alloc->next = NULL;
+ iscanbuf_head = *iscanbuf;
+
+ DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+ "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+ __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+ if (iscanbuf_head == NULL) {
+ *iscanbuf = iscanbuf_alloc;
+ DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+ goto fail;
+ }
+
+ while (iscanbuf_head->next)
+ iscanbuf_head = iscanbuf_head->next;
+
+ iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+ dhd_iscan_unlock();
+ return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+ iscan_buf_t *iscanbuf_free = 0;
+ iscan_buf_t *iscanbuf_prv = 0;
+ iscan_buf_t *iscanbuf_cur;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+
+ dhd_iscan_lock();
+
+ iscanbuf_cur = iscan_chain;
+
+ /* If iscan_delete is null then delete the entire
+ * chain or else delete specific one provided
+ */
+ if (!iscan_delete) {
+ while (iscanbuf_cur) {
+ iscanbuf_free = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ iscanbuf_free->next = 0;
+ MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+ }
+ iscan_chain = 0;
+ } else {
+ while (iscanbuf_cur) {
+ if (iscanbuf_cur == iscan_delete)
+ break;
+ iscanbuf_prv = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ }
+ if (iscanbuf_prv)
+ iscanbuf_prv->next = iscan_delete->next;
+
+ iscan_delete->next = 0;
+ MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+ if (!iscanbuf_prv)
+ iscan_chain = 0;
+ }
+ dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+ return iscan_chain;
+}
+
+int
+dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
+{
+ int rc = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ char *buf;
+ char iovar[] = "iscan";
+ uint32 allocSize = 0;
+ wl_ioctl_t ioctl;
+
+ if (pParams) {
+ allocSize = (size + strlen(iovar) + 1);
+ if ((allocSize < size) || (allocSize < strlen(iovar)))
+ {
+ DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
+ __FUNCTION__, allocSize, size, strlen(iovar)));
+ goto cleanUp;
+ }
+ buf = MALLOC(dhd->osh, allocSize);
+
+ if (buf == NULL)
+ {
+ DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
+ goto cleanUp;
+ }
+ ioctl.cmd = WLC_SET_VAR;
+ bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize);
+ }
+
+cleanUp:
+ if (buf) {
+ MFREE(dhd->osh, buf, allocSize);
+ }
+
+ return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+ wl_iscan_results_t *list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ iscan_buf_t *iscan_cur;
+ int status = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ int rc;
+ wl_ioctl_t ioctl;
+
+ DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
+
+ iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+ if (!iscan_cur) {
+ DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(dhdp, FALSE);
+ goto fail;
+ }
+
+ dhd_iscan_lock();
+
+ memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+ iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+ ioctl.cmd = WLC_GET_VAR;
+ ioctl.set = FALSE;
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ *scan_count = results->count = dtoh32(results->count);
+ status = dtoh32(list_buf->status);
+ DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
+
+ dhd_iscan_unlock();
+
+ if (!(*scan_count)) {
+ /* TODO: race condition when FLUSH already called */
+ dhd_iscan_free_buf(dhdp, 0);
+ }
+fail:
+ return status;
+}
+
+#endif /* SIMPLE_ISCAN */
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf)
+{
+ char bssid[6], zbuf[6];
+ int ret = -1;
+
+ bzero(bssid, 6);
+ bzero(zbuf, 6);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+ DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTASSOCIATED) {
+ DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+ }
+
+ if (ret < 0)
+ return FALSE;
+
+ if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+ /* STA is assocoated BSSID is non zero */
+
+ if (bss_buf) {
+ /* return bss if caller provided buf */
+ memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+ }
+ return TRUE;
+ } else {
+ DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+ int bcn_li_dtim;
+ int ret = -1;
+ int dtim_assoc = 0;
+
+ if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+ bcn_li_dtim = 3;
+ else
+ bcn_li_dtim = dhd->dtim_skip;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* if assoc grab ap's dtim value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+ &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+ __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+ /* if not assocated just eixt */
+ if (dtim_assoc == 0) {
+ goto exit;
+ }
+
+ /* check if sta listen interval fits into AP dtim */
+ if (dtim_assoc > LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = 1;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+ __FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+ goto exit;
+ }
+
+ if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+
+exit:
+ return bcn_li_dtim;
+}
+
+/* Check if HostAPD or WFD mode setup */
+bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd)
+{
+#ifdef WL_CFG80211
+#ifndef WL_ENABLE_P2P_IF
+ /* To be back compatble with ICS MR1 release where p2p interface
+ * disable but wlan0 used for p2p
+ */
+ if (((dhd->op_mode & HOSTAPD_MASK) == HOSTAPD_MASK) ||
+ ((dhd->op_mode & WFD_MASK) == WFD_MASK)) {
+ return TRUE;
+ }
+ else
+#else
+ /* concurent mode with p2p interface for wfd and wlan0 for sta */
+ if (((dhd->op_mode & P2P_GO_ENABLED) == P2P_GO_ENABLED) ||
+ ((dhd->op_mode & P2P_GC_ENABLED) == P2P_GC_ENABLED)) {
+ DHD_ERROR(("%s P2P enabled for mode=%d\n", __FUNCTION__, dhd->op_mode));
+ return TRUE;
+ }
+ else
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WL_CFG80211 */
+ return FALSE;
+}
+
+#if defined(PNO_SUPPORT)
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+ char iovbuf[128];
+ int pfn_enabled = 0;
+ int iov_len = 0;
+ int ret;
+
+ /* Disable pfn */
+ iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) {
+ /* clear pfn */
+ iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (iov_len) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ iov_len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ }
+ }
+ else {
+ ret = -1;
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+ }
+ }
+ else
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+ char iovbuf[128];
+ int ret = -1;
+
+ if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (ret);
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+
+ if ((pfn_enabled) && (dhd_is_associated(dhd, NULL) == TRUE)) {
+ DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Enable/disable PNO */
+ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ dhd->pno_enable = pfn_enabled;
+ DHD_TRACE(("%s set pno as %s\n",
+ __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable"));
+ }
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr,
+ int pno_repeat, int pno_freq_expo_max)
+{
+ int err = -1;
+ char iovbuf[128];
+ int k, i;
+ wl_pfn_param_t pfn_param;
+ wl_pfn_t pfn_element;
+ uint len = 0;
+
+ DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+ if ((!dhd) && (!ssids_local)) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ return err;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (err);
+
+ /* Check for broadcast ssid */
+ for (k = 0; k < nssid; k++) {
+ if (!ssids_local[k].SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+ return err;
+ }
+ }
+/* #define PNO_DUMP 1 */
+#ifdef PNO_DUMP
+ {
+ int j;
+ for (j = 0; j < nssid; j++) {
+ DHD_ERROR(("%d: scan for %s size =%d\n", j,
+ ssids_local[j].SSID, ssids_local[j].SSID_len));
+ }
+ }
+#endif /* PNO_DUMP */
+
+ /* clean up everything */
+ if ((err = dhd_pno_clean(dhd)) < 0) {
+ DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+ return err;
+ }
+ memset(iovbuf, 0, sizeof(iovbuf));
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ memset(&pfn_element, 0, sizeof(pfn_element));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+ /* check and set extra pno params */
+ if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = (uchar) (pno_repeat);
+ pfn_param.exp = (uchar) (pno_freq_expo_max);
+ }
+ /* set up pno scan fr */
+ if (scan_fr != 0)
+ pfn_param.scan_freq = htod32(scan_fr);
+
+ if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+ return err;
+ }
+ if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+ DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ return err;
+ }
+
+ len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+ if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s pfn_set failed for error=%d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ /* set all pfn ssid */
+ for (i = 0; i < nssid; i++) {
+
+ pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+ pfn_element.auth = (DOT11_OPEN_SYSTEM);
+ pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+ pfn_element.wsec = htod32(0);
+ pfn_element.infra = htod32(1);
+ pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+ pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+ if ((len =
+ bcm_mkiovar("pfn_add", (char *)&pfn_element,
+ sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((err =
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for i=%d error=%d\n",
+ __FUNCTION__, i, err));
+ return err;
+ }
+ else
+ DHD_TRACE(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n",
+ __FUNCTION__, pfn_param.scan_freq,
+ pfn_param.repeat, pfn_param.exp));
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+ }
+
+ /* Enable PNO */
+ /* dhd_pno_enable(dhd, 1); */
+ return err;
+}
+
+int
+dhd_pno_get_status(dhd_pub_t *dhd)
+{
+ int ret = -1;
+
+ if (!dhd)
+ return ret;
+ else
+ return (dhd->pno_enable);
+}
+
+#endif /* OEM_ANDROID && PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+ char buf[256];
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int buf_len;
+ int str_len;
+ int res = -1;
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (res);
+
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[ str_len ] = '\0';
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+ mkeep_alive_pkt.period_msec = KEEP_ALIVE_PERIOD;
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+ res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ * data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+ int input_size, int *bytes_left)
+{
+ char* str = *list_str;
+ uint16 short_temp;
+ uint32 int_temp;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ /* Clean all dest bytes */
+ memset(dst, 0, dst_size);
+ while (*bytes_left > 0) {
+
+ if (str[0] != token) {
+ DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+ __FUNCTION__, token, str[0], *bytes_left));
+ return -1;
+ }
+
+ *bytes_left -= 1;
+ str += 1;
+
+ if (input_size == 1) {
+ memcpy(dst, str, input_size);
+ }
+ else if (input_size == 2) {
+ memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+ input_size);
+ }
+ else if (input_size == 4) {
+ memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+ input_size);
+ }
+
+ *bytes_left -= input_size;
+ str += input_size;
+ *list_str = str;
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+ *list_str = str;
+ DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+ /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* All channels */
+ channel_list[idx] = 0x0;
+ }
+ else {
+ channel_list[idx] = (uint16)str[0];
+ DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
+ }
+ *bytes_left -= 1;
+ str += 1;
+
+ if (idx++ > 255) {
+ DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/*
+ * SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+ char* str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+ str = *list_str;
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+ *list_str = str;
+ DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+
+ /* Get proper CSCAN_TLV_TYPE_SSID_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* Broadcast SSID */
+ ssid[idx].SSID_len = 0;
+ memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+ *bytes_left -= 1;
+ str += 1;
+
+ DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
+ }
+ else if (str[0] <= DOT11_MAX_SSID_LEN) {
+ /* Get proper SSID size */
+ ssid[idx].SSID_len = str[0];
+ *bytes_left -= 1;
+ str += 1;
+
+ /* Get SSID */
+ if (ssid[idx].SSID_len > *bytes_left) {
+ DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+ __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+ return -1;
+ }
+
+ memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+ *bytes_left -= ssid[idx].SSID_len;
+ str += ssid[idx].SSID_len;
+
+ DHD_TRACE(("%s :size=%d left=%d\n",
+ (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+ }
+ else {
+ DHD_ERROR(("### SSID size more that %d\n", str[0]));
+ return -1;
+ }
+
+ if (idx++ > max) {
+ DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx. Max specifies size of the ssid array. Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied. Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+ char* str, *ptr;
+
+ if ((list_str == NULL) || (*list_str == NULL))
+ return -1;
+
+ for (str = *list_str; str != NULL; str = ptr) {
+
+ /* check for next TAG */
+ if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+ *list_str = str + strlen(GET_CHANNEL);
+ return idx;
+ }
+
+ if ((ptr = strchr(str, ',')) != NULL) {
+ *ptr++ = '\0';
+ }
+
+ if (strlen(str) > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+ return -1;
+ }
+
+ if (strlen(str) == 0)
+ ssid[idx].SSID_len = 0;
+
+ if (idx < max) {
+ bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str);
+ ssid[idx].SSID_len = strlen(str);
+ }
+ idx++;
+ }
+ return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+ int num;
+ int val;
+ char* str;
+ char* endptr = NULL;
+
+ if ((list_str == NULL)||(*list_str == NULL))
+ return -1;
+
+ str = *list_str;
+ num = 0;
+ while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+ val = (int)strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ printf("could not parse channel number starting at"
+ " substring \"%s\" in list:\n%s\n",
+ str, *list_str);
+ return -1;
+ }
+ str = endptr + strspn(endptr, " ,");
+
+ if (num == channel_num) {
+ DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+ channel_num, *list_str));
+ return -1;
+ }
+
+ channel_list[num++] = (uint16)val;
+ }
+ *list_str = str;
+ return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 000000000000..9a9d182e6b8e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,293 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c 291086 2011-10-21 01:17:24Z $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern void bcm_wlan_power_off(int);
+extern void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#if defined(CUSTOMER_HW2)
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#else
+int wifi_set_power(int on, unsigned long msec) { return -1; }
+int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; }
+int wifi_get_mac_addr(unsigned char *buf) { return -1; }
+void *wifi_get_country_code(char *ccode) { return NULL; }
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#endif /* CUSTOMER_HW2 */
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC) */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ * 1) return : Host gpio interrupt number per customer platform
+ * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ * NOTE :
+ * Customer should check his platform definitions
+ * and his Host Interrupt spec
+ * to figure out the proper setting for his platform.
+ * Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+ int host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+ host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif /* CUSTOMER_HW2 */
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+ host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+ gpio_request(dhd_oob_gpio_num, "oob irq");
+ host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+ gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+ return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+ switch (onoff) {
+ case WLAN_RESET_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(0, 0);
+#endif
+ WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+ break;
+
+ case WLAN_RESET_ON:
+ WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(1, 0);
+#endif
+ WL_ERROR(("=========== WLAN going back to live ========\n"));
+ break;
+
+ case WLAN_POWER_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+ break;
+
+ case WLAN_POWER_ON:
+ WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(1);
+ /* Lets customer power to get stable */
+ OSL_DELAY(200);
+#endif /* CUSTOMER_HW */
+ break;
+ }
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+ int ret = 0;
+
+ WL_TRACE(("%s Enter\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+
+ /* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+
+ return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XY", 4}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */
+ {"AT", "EU", 5},
+ {"BE", "EU", 5},
+ {"BG", "EU", 5},
+ {"CY", "EU", 5},
+ {"CZ", "EU", 5},
+ {"DK", "EU", 5},
+ {"EE", "EU", 5},
+ {"FI", "EU", 5},
+ {"FR", "EU", 5},
+ {"DE", "EU", 5},
+ {"GR", "EU", 5},
+ {"HU", "EU", 5},
+ {"IE", "EU", 5},
+ {"IT", "EU", 5},
+ {"LV", "EU", 5},
+ {"LI", "EU", 5},
+ {"LT", "EU", 5},
+ {"LU", "EU", 5},
+ {"MT", "EU", 5},
+ {"NL", "EU", 5},
+ {"PL", "EU", 5},
+ {"PT", "EU", 5},
+ {"RO", "EU", 5},
+ {"SK", "EU", 5},
+ {"SI", "EU", 5},
+ {"ES", "EU", 5},
+ {"SE", "EU", 5},
+ {"GB", "EU", 5},
+ {"KR", "XY", 3},
+ {"AU", "XY", 3},
+ {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+ {"TW", "XY", 3},
+ {"AR", "XY", 3},
+ {"MX", "XY", 3},
+ {"IL", "IL", 0},
+ {"CH", "CH", 0},
+ {"TR", "TR", 0},
+ {"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+ struct cntry_locales_custom *cloc_ptr;
+
+ if (!cspec)
+ return;
+
+ cloc_ptr = wifi_get_country_code(country_iso_code);
+ if (cloc_ptr) {
+ strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = cloc_ptr->custom_locale_rev;
+ }
+ return;
+#else
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode,
+ translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+#ifdef EXAMPLE_TABLE
+ /* if no country code matched return first universal code from translate_custom_table */
+ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+ return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 000000000000..2b40d2738c9d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,110 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h 308299 2012-01-14 01:36:58Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+ printf args;} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+
+#define DHD_ERROR_ON() 0
+#define DHD_TRACE_ON() 0
+#define DHD_INFO_ON() 0
+#define DHD_DATA_ON() 0
+#define DHD_CTL_ON() 0
+#define DHD_TIMER_ON() 0
+#define DHD_HDRS_ON() 0
+#define DHD_BYTES_ON() 0
+#define DHD_INTR_ON() 0
+#define DHD_GLOM_ON() 0
+#define DHD_EVENT_ON() 0
+#define DHD_BTA_ON() 0
+#define DHD_ISCAN_ON() 0
+#define DHD_ARPOE_ON() 0
+#define DHD_REORDER_ON() 0
+#endif
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 000000000000..0d1ba9d92ee1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,5331 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c 329678 2012-04-26 08:51:32Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#ifdef WLBTAMP
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200 /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
+#define TSMAX 1000 /* max no. of timing record kept */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+ uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add);
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr);
+
+static struct notifier_block dhd_notifier = {
+ .notifier_call = dhd_device_event
+};
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+ return "";
+}
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t g_wl_iw_params;
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ dhd_if_state_t state; /* interface state */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool set_multicast;
+ bool event2cfg80211; /* To determine if pass event to cfg80211 */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+ uint32 low;
+ uint32 high;
+} tsf_t;
+
+typedef struct {
+ uint32 last_cycle;
+ uint32 last_sec;
+ uint32 last_tsf;
+ uint32 coef; /* scaling factor */
+ uint32 coefdec1; /* first decimal */
+ uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif /* WLMEDIA_HTSF */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd_pub_t pub;
+
+ /* For supporting multiple interfaces */
+ dhd_if_t *iflist[DHD_MAX_IFS];
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+ htsf_t htsf;
+#endif
+ wait_queue_head_t ioctl_resp_wait;
+ struct timer_list timer;
+ bool wd_timer_valid;
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+#ifdef DHDTHREAD
+ /* Thread based operation */
+ bool threads_only;
+ struct semaphore sdsem;
+
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+#endif /* DHDTHREAD */
+ bool dhd_tasklet_create;
+ tsk_ctl_t thr_sysioc_ctl;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+#endif
+ spinlock_t wakelock_spinlock;
+ int wakelock_counter;
+ int wakelock_timeout_enable;
+
+ /* Thread to issue ioctl for multicast */
+ unsigned char set_macaddress;
+ struct ether_addr macvalue;
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+ void *rpc_th;
+ void *rpc_osh;
+ struct timer_list rpcth_timer;
+ bool rpcth_timer_active;
+ bool fdaggr;
+#endif
+} dhd_info_t;
+
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+int op_mode = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+extern int net_os_send_hang_message(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+struct semaphore dhd_chipup_sem;
+
+#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 0;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 1);
+
+#ifdef DHDTHREAD
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+#endif /* DHDTHREAD */
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif /* DHD_DEBUG */
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ dhd_mmc_suspend = TRUE;
+ ret = NOTIFY_OK;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ dhd_mmc_suspend = FALSE;
+ ret = NOTIFY_OK;
+ break;
+ }
+ smp_mb();
+#endif
+ return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+ .notifier_call = dhd_sleep_pm_callback,
+ .priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable && !dhd->dhcp_in_progress) {
+ int i;
+
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
+ }
+#endif
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+ int power_mode = PM_MAX;
+ /* wl_pkt_filter_enable_t enable_parm; */
+ char iovbuf[32];
+ int bcn_li_dtim = 3;
+ uint roamvar = 1;
+
+ DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+ __FUNCTION__, value, dhd->in_suspend));
+
+ if (dhd && dhd->up) {
+ if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+ dhd->early_suspended = 1;
+#endif
+ /* Kernel suspended */
+ DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* Enable packet filter, only allow unicast packet to send up */
+ dhd_set_packet_filter(1, dhd);
+
+ /* If DTIM skip is set up as default, force it to wake
+ * each third DTIM for better power savings. Note that
+ * one side effect is a chance to miss BC/MC packet.
+ */
+ bcn_li_dtim = dhd_get_dtim_skip(dhd);
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* Disable firmware roaming during suspend */
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ } else {
+#ifdef PKT_FILTER_SUPPORT
+ dhd->early_suspended = 0;
+#endif
+ /* Kernel resumed */
+ DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+ power_mode = PM_FAST;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* disable pkt filter */
+ dhd_set_packet_filter(0, dhd);
+
+ /* restore pre-suspend setting for dtim_skip */
+ bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+ 4, iovbuf, sizeof(iovbuf));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ roamvar = dhd_roam_disable;
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+ sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if ((!dhdp->suspend_disable_flag) && (dhd_check_ap_wfd_mode_set(dhdp) == FALSE))
+ dhd_set_suspend(val, dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+ tmo->limit = usec;
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = 1;
+ return 0;
+ }
+
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
+
+ /* Add the delay that's about to take place */
+ tmo->elapsed += tmo->increment;
+
+ if (tmo->increment < tmo->tick) {
+ OSL_DELAY(tmo->increment);
+ tmo->increment *= 2;
+ if (tmo->increment > tmo->tick)
+ tmo->increment = tmo->tick;
+ } else {
+ wait_queue_head_t delay_wait;
+ DECLARE_WAITQUEUE(wait, current);
+ int pending;
+ init_waitqueue_head(&delay_wait);
+ add_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ pending = signal_pending(current);
+ remove_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ if (pending)
+ return 1; /* Interrupted */
+ }
+
+ return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
+
+ ASSERT(dhd);
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
+ }
+
+ return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+ struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+ struct dhd_info *dhd_info;
+
+ if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+ return NULL;
+ dhd_info = dhd_pub->info;
+ if (dhd_info && dhd_info->iflist[ifidx])
+ return dhd_info->iflist[ifidx]->net;
+ return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ break;
+
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+ return i; /* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
+ }
+
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+ int i;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+ ASSERT(dhd);
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+ return dhd->iflist[i]->mac_addr;
+
+ return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
+
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+ /* Send down the multicast list first. */
+
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
+ }
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#else
+ for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(allmulti);
+ if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+ return;
+ }
+ allmulti = htol32(allmulti);
+
+ if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+ DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+ MFREE(dhd->pub.osh, buf, buflen);
+ return;
+ }
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+ allmulti = htol32(allmulti);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+ char buf[32];
+ wl_ioctl_t ioc;
+ int ret;
+
+ if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+ DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+ return -1;
+ }
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = 32;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+ } else {
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+ }
+
+ return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+ dhd_info_t *dhd;
+ int ret = 0, err = 0;
+#ifdef SOFTAP
+ unsigned long flags;
+#endif
+
+ if (!ifp || !ifp->info || !ifp->idx)
+ return;
+ ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
+ dhd = ifp->info;
+
+ DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange())
+ return;
+
+#endif
+ switch (ifp->state) {
+ case DHD_IF_ADD:
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the WLC_E_IF_DEL event.
+ */
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+ __FUNCTION__, ifp->net->name));
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ /* Allocate etherdev, including space for private structure */
+ if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ ret = -ENOMEM;
+ }
+ if (ret == 0) {
+ strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
+ ifp->net->name[IFNAMSIZ - 1] = '\0';
+ memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+ if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx,
+ (void*)dhd_net_attach)) {
+ ifp->state = DHD_IF_NONE;
+ ifp->event2cfg80211 = TRUE;
+ return;
+ }
+#endif
+ if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+ DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+ __FUNCTION__, err));
+ ret = -EOPNOTSUPP;
+ } else {
+#if defined(SOFTAP)
+ if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ /* semaphore that the soft AP CODE waits on */
+ flags = dhd_os_spin_lock(&dhd->pub);
+
+ /* save ptr to wl0.1 netdev for use in wl_iw.c */
+ ap_net_dev = ifp->net;
+ /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+ up(&ap_eth_ctl.sema);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+#endif
+ DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+ current->pid, ifp->net->name));
+ ifp->state = DHD_IF_NONE;
+ }
+ }
+ break;
+ case DHD_IF_DEL:
+ /* Make sure that we don't enter again here if .. */
+ /* dhd_op_if is called again from some other context */
+ ifp->state = DHD_IF_DELETING;
+ if (ifp->net != NULL) {
+ DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_notify_ifdel(ifp->net);
+ }
+#endif
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
+ }
+ break;
+ case DHD_IF_DELETING:
+ break;
+ default:
+ DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+ ASSERT(!ifp->state);
+ break;
+ }
+
+ if (ret < 0) {
+ ifp->set_multicast = FALSE;
+ if (ifp->net) {
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ dhd->iflist[ifp->idx] = NULL;
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ if (ifp->net == ap_net_dev)
+ ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ }
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+
+ int i;
+#ifdef SOFTAP
+ bool in_ap = FALSE;
+ unsigned long flags;
+#endif
+
+ DAEMONIZE("dhd_sysioc");
+
+ complete(&tsk->completed);
+
+ while (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ in_ap = (ap_net_dev != NULL);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ if (dhd->iflist[i] && dhd->iflist[i]->state)
+ dhd_op_if(dhd->iflist[i]);
+
+ if (dhd->iflist[i] == NULL) {
+ DHD_TRACE(("\n\n %s: interface %d just been removed,"
+ "!\n\n", __FUNCTION__, i));
+ continue;
+ }
+#ifdef SOFTAP
+ if (in_ap && dhd->set_macaddress == i+1) {
+ DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
+ "blocked. \n", dhd->iflist[i]->net->name));
+ dhd->set_macaddress = 0;
+ continue;
+ }
+
+ if (in_ap && dhd->iflist[i]->set_multicast) {
+ DHD_TRACE(("attempt to set MULTICAST list for %s"
+ "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
+ dhd->iflist[i]->set_multicast = FALSE;
+ continue;
+ }
+#endif /* SOFTAP */
+ if (dhd->iflist[i]->set_multicast) {
+ dhd->iflist[i]->set_multicast = FALSE;
+ _dhd_set_multicast_list(dhd, i);
+ }
+ if (dhd->set_macaddress == i+1) {
+ dhd->set_macaddress = 0;
+ _dhd_set_mac_address(dhd, i, &dhd->macvalue);
+ }
+ }
+ }
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+ }
+ DHD_TRACE(("%s: stopped\n", __FUNCTION__));
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+ int ret = 0;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+ dhd->set_macaddress = ifidx+1;
+ up(&dhd->thr_sysioc_ctl.sema);
+
+ return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ dhd->iflist[ifidx]->set_multicast = TRUE;
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ spin_lock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+ (void)di;
+ ASSERT(di != NULL);
+ spin_unlock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
+
+#endif /* PROP_TXSTATUS */
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh = NULL;
+
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ /* free the packet here since the caller won't */
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *)pktdata;
+
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+ atomic_inc(&dhd->pend_8021x_cnt);
+ } else {
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+
+ /* Look into the packet and update the packet priority */
+ if (PKTPRIO(pktbuf) == 0)
+ pktsetprio(pktbuf, FALSE);
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state) {
+ /* store the interface ID */
+ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+ /* store destination MAC in the tag as well */
+ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+ /* decide which FIFO this packet belongs to */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ /* one additional queue index (highest AC + 1) is used for bc/mc queue */
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+ else
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+ } else
+#endif /* PROP_TXSTATUS */
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE) {
+ dhd_os_wlfc_block(dhdp);
+ ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)),
+ pktbuf);
+ dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus);
+ if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
+ }
+ dhd_os_wlfc_unblock(dhdp);
+ }
+ else
+ /* non-proptxstatus way */
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* PROP_TXSTATUS */
+
+ return ret;
+}
+
+int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ int ret;
+ void *pktbuf;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ int ifidx;
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+ uint8 htsfdlystat_sz = 0;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* Reject if down */
+ if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ netif_stop_queue(net);
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ net_os_send_hang_message(net);
+ }
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ netif_stop_queue(net);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+
+ /* Make sure there's enough room for any header */
+
+ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
+
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
+ }
+#ifdef WLMEDIA_HTSF
+ if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (!ETHER_ISMULTI(eh->ether_dhost) &&
+ (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+ eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+ }
+ }
+#endif
+
+ ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+
+done:
+ if (ret)
+ dhd->pub.dstats.tx_dropped++;
+ else
+ dhd->pub.tx_packets++;
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return 0;
+#else
+ return NETDEV_TX_OK;
+#endif
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdp->txoff = state;
+ ASSERT(dhd);
+
+ if (ifidx == ALL_INTERFACES) {
+ /* Flow control on all active interfaces */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ net = dhd->iflist[i]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+ }
+ else {
+ if (dhd->iflist[ifidx]) {
+ net = dhd->iflist[ifidx]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void *data, *pnext = NULL;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+ int tout = DHD_PACKET_TIMEOUT_MS;
+
+
+ BCM_REFERENCE(tout);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+#ifdef WLBTAMP
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+#endif
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ /* Dropping packets before registering net device to avoid kernel panic */
+ if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED ||
+ !dhd->pub.up) {
+ DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#endif
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+#ifdef WLBTAMP
+ eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
+ (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) &&
+ bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ lsh->type == HTON16(BTA_PROT_L2CAP)) {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
+ ((uint8 *)eh + RFC1042_HDR_LEN);
+ ACL_data = NULL;
+ }
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
+ /* WLFC may send header only packet when
+ there is an urgent message but no packet to
+ piggy-back on
+ */
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#endif
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ }
+
+ skb->data = eth;
+ skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb->mac_header,
+#else
+ skb->mac.raw,
+#endif
+ &event,
+ &data);
+
+#ifdef WLBTAMP
+ wl_event_to_host_order(&event);
+ if (event.event_type == WLC_E_BTA_HCI_EVENT) {
+ dhd_bta_doevt(dhdp, data, event.datalen);
+ }
+ tout = DHD_EVENT_TIMEOUT_MS;
+#endif /* WLBTAMP */
+ }
+
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp->net)
+ ifp->net->last_rx = jiffies;
+
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+ }
+ }
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(dhdp, tout);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+ /* Linux version has nothing to do */
+ return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint ifidx;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+#ifdef WLBTAMP
+ uint len;
+#endif
+
+ dhd_prot_hdrpull(dhdp, &ifidx, txp, NULL, NULL);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+
+ if (type == ETHER_TYPE_802_1X)
+ atomic_dec(&dhd->pend_8021x_cnt);
+
+#ifdef WLBTAMP
+ /* Crack open the packet and check to see if it is BT HCI ACL data packet.
+ * If yes generate packet completion event.
+ */
+ len = PKTLEN(dhdp->osh, txp);
+
+ /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
+ if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
+ struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+
+ dhd_bta_tx_hcidata_complete(dhdp, txp, success);
+ }
+ }
+#endif /* WLBTAMP */
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_if_t *ifp;
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+ return NULL;
+ }
+
+ ifp = dhd->iflist[ifidx];
+ ASSERT(dhd && ifp);
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
+ }
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+ ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_watchdog_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_watchdog");
+
+ /* Run until signal received */
+ complete(&tsk->completed);
+
+ while (1)
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_os_sdlock(&dhd->pub);
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer,
+ jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void dhd_watchdog(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+ unsigned long flags;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ if (dhd->pub.dongle_reset) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return;
+ }
+
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ up(&dhd->thr_wdt_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ dhd_os_sdlock(&dhd->pub);
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_dpc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_dpc");
+ /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
+
+ /* signal: thread has started */
+ complete(&tsk->completed);
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ up(&tsk->sema);
+ }
+ else {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ }
+ else
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)data;
+
+ /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+ * down below , wake lock is set,
+ * the tasklet is initialized in dhd_attach()
+ */
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus))
+ tasklet_schedule(&dhd->tasklet);
+ else
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHDTHREAD
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ up(&dhd->thr_dpc_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ if (dhd->dhd_tasklet_create)
+ tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "toe_ol");
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int toe, ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = TRUE;
+
+ /* Set toe_ol as requested */
+
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+
+ toe = (toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+ sprintf(info->driver, "wl");
+ sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver)-1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, EPI_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+ if (!dhdp)
+ return FALSE;
+ if ((error == -ETIMEDOUT) || ((dhdp->busstate == DHD_BUS_DOWN) &&
+ (!dhdp->dongle_reset))) {
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+ net_os_send_hang_message(net);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int buflen = 0;
+ void *buf = NULL;
+ uint driver = 0;
+ int ifidx;
+ int ret;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->pub.hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+
+#if defined(CONFIG_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(net, ifr, cmd);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+ if (cmd != SIOCDEVPRIVATE) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -EOPNOTSUPP;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ /* Copy out any buffer passed */
+ if (ioc.buf) {
+ if (ioc.len == 0) {
+ DHD_TRACE(("%s: ioc.len=0, returns BCME_BADARG \n", __FUNCTION__));
+ bcmerror = -BCME_BADARG;
+ goto done;
+ }
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ /* optimization for direct ioctl calls from kernel */
+ /*
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ buf = ioc.buf;
+ } else {
+ */
+ {
+ if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+ bcmerror = -BCME_NOMEM;
+ goto done;
+ }
+ if (copy_from_user(buf, ioc.buf, buflen)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+ }
+ }
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = -BCME_EPERM;
+ goto done;
+ }
+
+ /* check for local dhd ioctl and handle it */
+ if (driver == DHD_IOCTL_MAGIC) {
+ bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+ if (bcmerror)
+ dhd->pub.bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* send to dongle (must be up, and wl). */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ if (!dhd->pub.iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ /*
+ * Flush the TX queue if required for proper message serialization:
+ * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption and
+ * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+ * prevent disassoc frame being sent before WPS-DONE frame.
+ */
+ if (ioc.cmd == WLC_SET_KEY ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("wsec_key", ioc.buf, 9) == 0) ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) ||
+ ioc.cmd == WLC_DISASSOC)
+ dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+ if (ioc.buf) {
+ /* short cut wl ioctl calls here */
+ if (strcmp("htsf", ioc.buf) == 0) {
+ dhd_ioctl_htsf_get(dhd, 0);
+ return BCME_OK;
+ }
+
+ if (strcmp("htsflate", ioc.buf) == 0) {
+ if (ioc.set) {
+ memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+ memset(&maxdelayts, 0, sizeof(tstamp_t));
+ maxdelay = 0;
+ tspktcnt = 0;
+ maxdelaypktno = 0;
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ } else {
+ dhd_dump_latency();
+ }
+ return BCME_OK;
+ }
+ if (strcmp("htsfclear", ioc.buf) == 0) {
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ htsf_seqnum = 0;
+ return BCME_OK;
+ }
+ if (strcmp("htsfhis", ioc.buf) == 0) {
+ dhd_dump_htsfhisto(&vi_d1, "H to D");
+ dhd_dump_htsfhisto(&vi_d2, "D to D");
+ dhd_dump_htsfhisto(&vi_d3, "D to H");
+ dhd_dump_htsfhisto(&vi_d4, "H to H");
+ return BCME_OK;
+ }
+ if (strcmp("tsport", ioc.buf) == 0) {
+ if (ioc.set) {
+ memcpy(&tsport, ioc.buf + 7, 4);
+ } else {
+ DHD_ERROR(("current timestamp port: %d \n", tsport));
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ if ((ioc.cmd == WLC_SET_VAR || ioc.cmd == WLC_GET_VAR) &&
+ ioc.buf != NULL && strncmp("rpc_", ioc.buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+ bcmerror = dhd_fdaggr_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+#else
+ bcmerror = BCME_UNSUPPORTED;
+#endif
+ goto done;
+ }
+ bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+ dhd_check_hang(net, &dhd->pub, bcmerror);
+
+ if (!bcmerror && buf && ioc.buf) {
+ if (copy_to_user(ioc.buf, buf, buflen))
+ bcmerror = -EFAULT;
+ }
+
+ if (buf)
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+#ifdef WL_CFG80211
+static int
+dhd_cleanup_virt_ifaces(dhd_info_t *dhd)
+{
+ int i = 1; /* Leave ifidx 0 [Primary Interface] */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int rollback_lock = FALSE;
+#endif
+
+ DHD_TRACE(("%s: Enter \n", __func__));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /* release lock for unregister_netdev */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = TRUE;
+ }
+#endif
+
+ for (i = 1; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("Deleting IF: %d \n", i));
+ if ((dhd->iflist[i]->state != DHD_IF_DEL) &&
+ (dhd->iflist[i]->state != DHD_IF_DELETING)) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_net_if_lock_local(dhd);
+ dhd_op_if(dhd->iflist[i]);
+ dhd_net_if_unlock_local(dhd);
+ }
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (rollback_lock)
+ rtnl_lock();
+#endif
+
+ return 0;
+}
+#endif /* WL_CFG80211 */
+
+static int
+dhd_stop(struct net_device *net)
+{
+ int ifidx;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
+ if (dhd->pub.up == 0) {
+ goto exit;
+ }
+ ifidx = dhd_net2idx(dhd, net);
+ BCM_REFERENCE(ifidx);
+
+#ifdef WL_CFG80211
+ if (ifidx == 0) {
+ wl_cfg80211_down(NULL);
+
+ /*
+ * For CFG80211: Clean up all the left over virtual interfaces
+ * when the primary Interface is brought down. [ifconfig wlan0 down]
+ */
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ dhd_cleanup_virt_ifaces(dhd);
+ }
+ }
+#endif
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_cleanup(&dhd->pub);
+#endif
+ /* Set state and stop OS transmissions */
+ dhd->pub.up = 0;
+ netif_stop_queue(net);
+
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+#if defined(WL_CFG80211)
+ if (ifidx == 0 && !dhd_download_fw_on_driverload)
+ wl_android_wifi_off(net);
+#endif
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
+ OLD_MOD_DEC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ uint up = 0;
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+ int ifidx;
+ int32 ret = 0;
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ /* Update FW path if it was changed */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0')) {
+ if (firmware_path[strlen(firmware_path)-1] == '\n')
+ firmware_path[strlen(firmware_path)-1] = '\0';
+ strcpy(fw_path, firmware_path);
+ firmware_path[0] = '\0';
+ }
+
+#if !defined(WL_CFG80211)
+ /*
+ * Force start if ifconfig_up gets called before START command
+ * We keep WEXT's wl_control_wl_start to provide backward compatibility
+ * This should be removed in the future
+ */
+ wl_control_wl_start(net);
+#endif
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (ifidx < 0) {
+ DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (ifidx == 0) {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+ DHD_ERROR(("\n%s\n", dhd_version));
+ if (!dhd_download_fw_on_driverload) {
+ ret = wl_android_wifi_on(net);
+ if (ret != 0) {
+ DHD_ERROR(("wl_android_wifi_on failed (%d)\n", ret));
+ goto exit;
+ }
+ }
+#endif
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+
+ /* try to bring up bus */
+ if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+
+ }
+
+ /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+ if (unlikely(wl_cfg80211_up(NULL))) {
+ DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+#endif /* WL_CFG80211 */
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ dhd->pub.up = 1;
+
+ /* Fire a WLC_UP for primary interface to enable RF */
+ if (ifidx == 0)
+ dhd_wl_ioctl_cmd(&dhd->pub, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
+
+#ifdef BCMDBGFS
+ dhd_dbg_init(&dhd->pub);
+#endif
+
+ OLD_MOD_INC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+ dhd_info_t *dhd = NULL;
+
+ if (!net) {
+ DHD_ERROR(("Primary Interface not initialized \n"));
+ return -EINVAL;
+ }
+
+ dhd = *(dhd_info_t **)netdev_priv(net);
+
+ /* If driver is already initialized, do nothing
+ */
+ if (dhd->pub.busstate == DHD_BUS_DATA) {
+ DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+ return 0;
+ }
+
+ if (dhd_open(net) < 0) {
+ DHD_ERROR(("Driver Init Failed \n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+ return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+ if (MALLOCED(osh)) {
+ DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ }
+ osl_detach(osh);
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ up(&dhd_registration_sem);
+#if defined(BCMLXSDMMC)
+ up(&dhd_chipup_sem);
+#endif
+#endif
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+ uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+ ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ } else
+ if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->event2cfg80211 = FALSE;
+ ifp->info = dhd;
+ dhd->iflist[ifidx] = ifp;
+ strncpy(ifp->name, name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ] = '\0';
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ if (handle == NULL) {
+ ifp->state = DHD_IF_ADD;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+ } else
+ ifp->net = (struct net_device *)handle;
+
+ if (ifidx == 0) {
+ ifp->event2cfg80211 = TRUE;
+ }
+
+ return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+ ifp = dhd->iflist[ifidx];
+ if (!ifp) {
+ DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+ return;
+ }
+
+ ifp->state = DHD_IF_DEL;
+ ifp->idx = ifidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net = NULL;
+
+ dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* updates firmware nvram path if it was provided as module parameters */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+ strcpy(fw_path, firmware_path);
+ if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+ strcpy(nv_path, nvram_path);
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
+
+ /* Allocate primary dhd_info */
+ if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(dhd, 0, sizeof(dhd_info_t));
+
+#ifdef DHDTHREAD
+ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+ dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#endif /* DHDTHREAD */
+ dhd->dhd_tasklet_create = FALSE;
+ dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
+ dhd->pub.osh = osh;
+
+ /* Link to info module */
+ dhd->pub.info = dhd;
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strncpy(net->name, iface_name, IFNAMSIZ);
+ net->name[IFNAMSIZ - 1] = 0;
+ len = strlen(net->name);
+ ch = net->name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strcat(net->name, "%d");
+ }
+
+ if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+ goto fail;
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+ spin_lock_init(&dhd->wlfc_spinlock);
+ dhd->pub.wlfc_enabled = TRUE;
+#endif /* PROP_TXSTATUS */
+
+ /* Initialize other structure content */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->ctrl_wait);
+
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->dhd_lock);
+
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wakelock_spinlock);
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_init(&dhd->dhd_net_if_mutex);
+#endif
+ dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+ /* Attach and link in the cfg80211 */
+ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+ DHD_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
+ }
+
+ dhd_monitor_init(&dhd->pub);
+ dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(CONFIG_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+
+ /* Set up the watchdog timer */
+ init_timer(&dhd->timer);
+ dhd->timer.data = (ulong)dhd;
+ dhd->timer.function = dhd_watchdog;
+
+#ifdef DHDTHREAD
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+ dhd->threads_only = TRUE;
+ }
+ else {
+ dhd->threads_only = FALSE;
+ }
+
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize watchdog thread */
+ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0);
+ } else {
+ dhd->thr_wdt_ctl.thr_pid = -1;
+ }
+
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0);
+ } else {
+ /* use tasklet for dpc */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->thr_dpc_ctl.thr_pid = -1;
+ }
+#else
+ /* Set up the bottom half handler */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->dhd_tasklet_create = TRUE;
+#endif /* DHDTHREAD */
+
+ if (dhd_sysioc) {
+ PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0);
+ } else {
+ dhd->thr_sysioc_ctl.thr_pid = -1;
+ }
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+ dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ register_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
+ return &dhd->pub;
+
+fail:
+ if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
+ if (net) free_netdev(net);
+ } else {
+ DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+ __FUNCTION__, dhd_state, &dhd->pub));
+ dhd->dhd_state = dhd_state;
+ dhd_detach(&dhd->pub);
+ dhd_free(&dhd->pub);
+ }
+
+ return NULL;
+}
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ unsigned long flags;
+
+ ASSERT(dhd);
+
+ DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+
+
+ /* try to download image and nvram to the dongle */
+ if ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+ (fw_path != NULL) && (fw_path[0] != '\0') &&
+ (nv_path != NULL) && (nv_path[0] != '\0')) {
+ /* wake lock moved to dhdsdio_download_firmware */
+ if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ fw_path, nv_path))) {
+ DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+ __FUNCTION__, fw_path, nv_path));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -1;
+ }
+ }
+ if (dhd->pub.busstate != DHD_BUS_LOAD) {
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENETDOWN;
+ }
+
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY)
+ /* Host registration for OOB interrupt */
+ if (bcmsdh_register_oob_intr(dhdp)) {
+ /* deactivate timer and wait for the handler to finish */
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+
+#ifdef READ_MACADDR
+ dhd_read_macaddr(dhd);
+#endif
+
+ /* Bus is ready, do any protocol initialization */
+ if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+ return ret;
+
+#ifdef WRITE_MACADDR
+ dhd_write_macaddr(dhd->pub.mac.octet);
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+ aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd->pend_ipaddr = 0;
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ return 0;
+}
+
+#if !defined(AP) && defined(WLP2P)
+/* For Android ICS MR2 release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+static u32
+dhd_concurrent_fw(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char buf[WLC_IOCTL_SMLEN];
+
+ if ((!op_mode) && (strstr(fw_path, "_p2p") == NULL) &&
+ (strstr(fw_path, "_apsta") == NULL)) {
+ /* Given path is for the STA firmware. Check whether P2P support is present in
+ * the firmware. If so, set mode as P2P (concurrent support).
+ */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_TRACE(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+ } else if (buf[0] == 1) {
+ DHD_TRACE(("%s: P2P is supported\n", __FUNCTION__));
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ uint power_mode = PM_OFF; /* PM_FAST; */
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = 0;
+ uint bcn_timeout = 4;
+ uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ int arpoe = 1;
+#endif
+ int scan_assoc_time = DHD_SCAN_ACTIVE_TIME;
+ int scan_unassoc_time = 40;
+ int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+ uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+ uint16 chipID;
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+ uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+#endif
+
+#if defined(AP) || defined(WLP2P)
+ uint32 apsta = 1; /* Enable APSTA mode */
+#endif /* defined(AP) || defined(WLP2P) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+ dhd->op_mode = 0;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ } else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ /* Get the default device MAC address directly from firmware */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+ if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == 0x02)) {
+ uint rand_mac;
+
+ srandom32((uint)jiffies);
+ rand_mac = random32();
+ iovbuf[0] = 0x02; /* locally administered bit */
+ iovbuf[1] = 0x1A;
+ iovbuf[2] = 0x11;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+ bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+ }
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+ DHD_TRACE(("Firmware = %s\n", fw_path));
+#if !defined(AP) && defined(WLP2P)
+ /* Check if firmware with WFD support used */
+ if ((!op_mode && strstr(fw_path, "_p2p") != NULL) || (op_mode == 0x04) ||
+ (dhd_concurrent_fw(dhd))) {
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s APSTA for WFD failed ret= %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= WFD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ }
+ }
+#endif
+
+#if !defined(AP) && defined(WL_CFG80211)
+ /* Check if firmware with HostAPD support used */
+ if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == 0x02)) {
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= HOSTAPD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ }
+ }
+#endif
+
+ if ((dhd->op_mode != WFD_MASK) && (dhd->op_mode != HOSTAPD_MASK)) {
+ /* STA only operation mode */
+ dhd->op_mode |= STA_MASK;
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#endif
+ }
+
+ DHD_ERROR(("Firmware up: op_mode=%d, "
+ "Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ dhd->op_mode,
+ dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2],
+ dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5]));
+
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+ sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+
+ /* Set Listen Interval */
+ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+ /* Set PowerSave mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+ /* Match Host and Dongle rx alignment */
+ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* disable glom option for some chips */
+ chipID = (uint16)dhd_bus_chip_id(dhd);
+ if ((chipID == BCM4330_CHIP_ID) || (chipID == BCM4329_CHIP_ID)) {
+ DHD_INFO(("%s disable glom for chipID=0x%X\n", __FUNCTION__, chipID));
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ /* Setup assoc_retry_max count to reconnect target AP in dongle */
+ bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif
+
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == FALSE)
+#endif
+ if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, res));
+ }
+#endif /* defined(KEEP_ALIVE) */
+
+ /* Read event_msgs mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+ /* Setup event_msgs */
+ setbit(eventmask, WLC_E_SET_SSID);
+ setbit(eventmask, WLC_E_PRUNE);
+ setbit(eventmask, WLC_E_AUTH);
+ setbit(eventmask, WLC_E_REASSOC);
+ setbit(eventmask, WLC_E_REASSOC_IND);
+ setbit(eventmask, WLC_E_DEAUTH);
+ setbit(eventmask, WLC_E_DEAUTH_IND);
+ setbit(eventmask, WLC_E_DISASSOC_IND);
+ setbit(eventmask, WLC_E_DISASSOC);
+ setbit(eventmask, WLC_E_JOIN);
+ setbit(eventmask, WLC_E_ASSOC_IND);
+ setbit(eventmask, WLC_E_PSK_SUP);
+ setbit(eventmask, WLC_E_LINK);
+ setbit(eventmask, WLC_E_NDIS_LINK);
+ setbit(eventmask, WLC_E_MIC_ERROR);
+ setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+ setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+ setbit(eventmask, WLC_E_PMKID_CACHE);
+ setbit(eventmask, WLC_E_TXFAIL);
+ setbit(eventmask, WLC_E_JOIN_START);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+ setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+ setbit(eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+ /* enable dongle roaming event */
+ setbit(eventmask, WLC_E_ROAM);
+#ifdef WL_CFG80211
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+ if ((dhd->op_mode & WFD_MASK) == WFD_MASK) {
+ setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+ setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE);
+ setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE);
+ setbit(eventmask, WLC_E_P2P_PROBREQ_MSG);
+ setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ }
+#endif /* WL_CFG80211 */
+
+ /* Write updated Event mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+ sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* Set and enable ARP offload feature for STA only */
+#if defined(SOFTAP)
+ if (arpoe && !ap_fw_loaded) {
+#else
+ if (arpoe) {
+#endif
+ dhd_arp_offload_enable(dhd, TRUE);
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ } else {
+ dhd_arp_offload_enable(dhd, FALSE);
+ dhd_arp_offload_set(dhd, 0);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup defintions for pktfilter , enable in suspend */
+ dhd->pktfilter_count = 4;
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[0] = "100 0 0 0 0x01 0x00";
+ dhd->pktfilter[1] = NULL;
+ dhd->pktfilter[2] = NULL;
+ dhd->pktfilter[3] = NULL;
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ int i;
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ 0, dhd_master_mode);
+ }
+ }
+#endif /* defined(SOFTAP) */
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+ char buf[strlen(name) + 1 + cmd_len];
+ int len = sizeof(buf);
+ wl_ioctl_t ioc;
+ int ret;
+
+ len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (!set && ret >= 0)
+ memcpy(cmd_buf, buf, cmd_len);
+
+ return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+ struct dhd_info *dhd = dhdp->info;
+ struct net_device *dev = NULL;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ ASSERT(dev);
+
+ if (netif_running(dev)) {
+ DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+ return BCME_NOTDOWN;
+ }
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+ if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+ DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+ return BCME_BADARG;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add)
+{
+ u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+ int i;
+ int ret;
+
+ bzero(ipv4_buf, sizeof(ipv4_buf));
+
+ /* display what we've got */
+ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+ /* now we saved hoste_ip table, clr it in the dongle AOE */
+ dhd_aoe_hostip_clr(dhd_pub);
+
+ if (ret) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return;
+ }
+
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (add && (ipv4_buf[i] == 0)) {
+ ipv4_buf[i] = ipa;
+ add = FALSE; /* added ipa to local table */
+ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+ __FUNCTION__, i));
+ } else if (ipv4_buf[i] == ipa) {
+ ipv4_buf[i] = 0;
+ DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+ __FUNCTION__, ipa, i));
+ }
+
+ if (ipv4_buf[i] != 0) {
+ /* add back host_ip entries from our local cache */
+ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]);
+ DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+ __FUNCTION__, ipv4_buf[i], i));
+ }
+ }
+#ifdef AOE_DBG
+ /* see the resulting hostip table */
+ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+
+ if (!ifa)
+ return NOTIFY_DONE;
+
+ dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+ dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+ if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+ if (ifa->ifa_dev->dev) {
+#endif
+ switch (event) {
+ case NETDEV_UP:
+ DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+ if (dhd->pend_ipaddr) {
+ DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+ __FUNCTION__, dhd->pend_ipaddr));
+ }
+ dhd->pend_ipaddr = ifa->ifa_address;
+ break;
+ }
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) {
+ DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+ }
+ else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+#endif
+ break;
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+ dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) {
+ DHD_ARPOE(("%s: primary interface is down, AOE clr all\n",
+ __FUNCTION__));
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+ } else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE);
+#else
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+#endif
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
+ }
+ }
+ return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct net_device *net = NULL;
+ int err = 0;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+
+ net = dhd->iflist[ifidx]->net;
+ ASSERT(net);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->get_stats = dhd_get_stats;
+ net->do_ioctl = dhd_ioctl_entry;
+ net->hard_start_xmit = dhd_start_xmit;
+ net->set_mac_address = dhd_set_mac_address;
+ net->set_multicast_list = dhd_set_multicast_list;
+ net->open = net->stop = NULL;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ /* Ok, link into the network layer... */
+ if (ifidx == 0) {
+ /*
+ * device functions for the primary interface only
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = dhd_open;
+ net->stop = dhd_stop;
+#else
+ net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+ } else {
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+ /*
+ * Android sets the locally administered bit to indicate that this is a
+ * portable hotspot. This will not work in simultaneous AP/STA mode,
+ * nor with P2P. Need to set the Donlge's MAC address, and then use that.
+ */
+ if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+ ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+ __func__, net->name));
+ temp_addr[0] |= 0x02;
+ }
+ }
+
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ if ((err = register_netdev(net)) != 0) {
+ DHD_ERROR(("couldn't register the net device, err %d\n", err));
+ goto fail;
+ }
+ printf("Broadcom Dongle Host Driver: register interface [%s]"
+ " MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ net->name,
+ net->dev_addr[0], net->dev_addr[1], net->dev_addr[2],
+ net->dev_addr[3], net->dev_addr[4], net->dev_addr[5]);
+
+#if defined(SOFTAP) && defined(CONFIG_WIRELESS_EXT) && !defined(WL_CFG80211)
+ wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (ifidx == 0) {
+ up(&dhd_registration_sem);
+ }
+#endif
+ return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+ return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+
+ /*
+ * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+ * calling stop again will cuase SD read/write errors.
+ */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ /* Stop the bus module */
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ }
+
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+ }
+ }
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ unsigned long flags;
+ int timer_valid = FALSE;
+
+ if (!dhdp)
+ return;
+
+ dhd = (dhd_info_t *)dhdp->info;
+ if (!dhd)
+ return;
+
+ DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ /* Give sufficient time for threads to start running in case
+ * dhd_attach() has failed
+ */
+ osl_delay(1000*100);
+ }
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ unregister_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+ /* Detatch and unlink in the iw */
+ wl_iw_detach();
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ if (&dhd->thr_sysioc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_sysioc_ctl);
+ }
+
+ /* delete all interfaces, start with virtual */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+ int i = 1;
+ dhd_if_t *ifp;
+
+ /* Cleanup virtual interfaces */
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i]) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_op_if(dhd->iflist[i]);
+ }
+
+ /* delete primary interface 0 */
+ ifp = dhd->iflist[0];
+ ASSERT(ifp);
+ ASSERT(ifp->net);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ if (ifp->net->open)
+#else
+ if (ifp->net->netdev_ops == &dhd_ops_pri)
+#endif
+ {
+ if (ifp->net) {
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ dhd->iflist[0] = NULL;
+
+ }
+ }
+
+ /* Clear the watchdog timer */
+ flags = dhd_os_spin_lock(&dhd->pub);
+ timer_valid = dhd->wd_timer_valid;
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ if (timer_valid)
+ del_timer_sync(&dhd->timer);
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_wdt_ctl);
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ }
+ else
+#endif /* DHDTHREAD */
+ tasklet_kill(&dhd->tasklet);
+ }
+ if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+ dhd_bus_detach(dhdp);
+
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+ }
+
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_detach(NULL);
+ dhd_monitor_uninit();
+ }
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+ /* && defined(CONFIG_PM_SLEEP) */
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&dhd->wl_wifi);
+ wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+ }
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ int i;
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ dhdp->reorder_bufs[i] = NULL;
+ }
+ }
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd)
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ }
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_bus_unregister();
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+ wl_android_exit();
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+
+static int __init
+dhd_module_init(void)
+{
+ int error = 0;
+
+#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int retry = POWERUP_MAX_RETRY;
+ int chip_up = 0;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ wl_android_init();
+
+#if defined(DHDTHREAD)
+ /* Sanity check on the module parameters */
+ do {
+ /* Both watchdog and DPC as tasklets are ok */
+ if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+ break;
+
+ /* If both watchdog and DPC are threads, TX must be deferred */
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+ break;
+
+ DHD_ERROR(("Invalid module parameters.\n"));
+ return -EINVAL;
+ } while (0);
+#endif
+
+#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ do {
+ sema_init(&dhd_chipup_sem, 0);
+ dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ if (wl_android_wifictrl_func_add() < 0)
+ goto fail_1;
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+ if (down_timeout(&dhd_chipup_sem,
+ msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+ dhd_bus_unreg_sdio_notify();
+ chip_up = 1;
+ break;
+ }
+ DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+ retry+1));
+ dhd_bus_unreg_sdio_notify();
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+ } while (retry-- > 0);
+
+ if (!chip_up) {
+ DHD_ERROR(("\nfailed to power up wifi chip, max retry reached, exits **\n\n"));
+ return -ENODEV;
+ }
+#else
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ if (wl_android_wifictrl_func_add() < 0)
+ goto fail_1;
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ sema_init(&dhd_registration_sem, 0);
+#endif
+
+
+ error = dhd_bus_register();
+
+ if (!error)
+ printf("\n%s\n", dhd_version);
+ else {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail_1;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+ error = -ENODEV;
+ DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+ goto fail_2;
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if defined(WL_CFG80211)
+ wl_android_post_init();
+#endif /* defined(WL_CFG80211) */
+
+ return error;
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+fail_2:
+ dhd_bus_unregister();
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+fail_1:
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+ return error;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+late_initcall(dhd_module_init);
+#else
+module_init(dhd_module_init);
+#endif
+
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ down(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ up(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = dhd_ioctl_timeout_msec;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(timeout);
+#else
+ timeout = timeout * HZ / 1000;
+#endif
+
+ /* Wait until control frame is available */
+ add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Memory barrier to support multi-processing
+ * As the variable "condition", which points to dhd->rxlen (dhd_bus_rxctl[dhd_sdio.c])
+ * Can be changed by another processor.
+ */
+ smp_mb();
+ while (!(*condition) && (!signal_pending(current) && timeout)) {
+ timeout = schedule_timeout(timeout);
+ smp_mb();
+ }
+
+ if (signal_pending(current))
+ *pending = TRUE;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+ return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+ wake_up_interruptible(&dhd->ioctl_resp_wait);
+ }
+
+ return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ flags = dhd_os_spin_lock(pub);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ dhd_os_spin_unlock(pub, flags);
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(pub, flags);
+#ifdef DHDTHREAD
+ del_timer_sync(&dhd->timer);
+#else
+ del_timer(&dhd->timer);
+#endif /* DHDTHREAD */
+ return;
+ }
+
+ if (wdtick) {
+ dhd_watchdog_ms = (uint)wdtick;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd->wd_timer_valid = TRUE;
+ }
+ dhd_os_spin_unlock(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ down(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ up(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+ dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+ dhd_os_sdunlock(pub);
+}
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+uint8* dhd_os_prealloc(void *osh, int section, uint size)
+{
+ return (uint8*)wl_android_prealloc(section, size);
+}
+
+void dhd_os_prefree(void *osh, void *addr, uint size)
+{
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (!dhd->pub.up) {
+ return NULL;
+ }
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data)
+{
+ int bcmerror = 0;
+ ASSERT(dhd != NULL);
+
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (event->bsscfgidx == 0) {
+ /*
+ * Wireless ext is on primary interface only
+ */
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+ if (dhd->iflist[*ifidx]->net) {
+ wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#ifdef WL_CFG80211
+ if ((ntoh32(event->event_type) == WLC_E_IF) &&
+ (((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD))
+ /* If ADD_IF has been called directly by wl utility then we
+ * should not report this. In case if ADD_IF was called from
+ * CFG stack, then too this event need not be reported back
+ */
+ return (BCME_OK);
+ if ((wl_cfg80211_is_progress_ifchange() ||
+ wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
+ /*
+ * If IF_ADD/CHANGE operation is going on,
+ * discard any event received on the virtual I/F
+ */
+ return (BCME_OK);
+ }
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+ if (dhd->iflist[*ifidx]->event2cfg80211 && dhd->iflist[*ifidx]->net) {
+ wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+#endif /* defined(WL_CFG80211) */
+
+ return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+ /* Send up locally generated AMP HCI Events */
+ case WLC_E_BTA_HCI_EVENT: {
+ struct sk_buff *p, *skb;
+ bcm_event_t *msg;
+ wl_event_msg_t *p_bcm_event;
+ char *ptr;
+ uint32 len;
+ uint32 pktlen;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *eth;
+ int ifidx;
+
+ len = ntoh32(event->datalen);
+ pktlen = sizeof(bcm_event_t) + len + 2;
+ dhd = dhdp->info;
+ ifidx = dhd_ifname2idx(dhd, event->ifname);
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
+
+ bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
+
+ msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ /* BCM Vendor specific header... */
+ msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
+ msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
+ bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
+
+ /* vendor spec header length + pvt data length (private indication
+ * hdr + actual message itself)
+ */
+ msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
+ BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
+ msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
+
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ /* copy wl_event_msg_t into sk_buf */
+
+ /* pointer to wl_event_msg_t in sk_buf */
+ p_bcm_event = &msg->event;
+ bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
+
+ /* copy hci event into sk_buf */
+ bcopy(data, (p_bcm_event + 1), len);
+
+ msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
+ ntoh16(msg->bcm_hdr.length));
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ ptr = (char *)(msg + 1);
+ /* Last 2 bytes of the message are 0x00 0x00 to signal that there
+ * are no ethertypes which are following this
+ */
+ ptr[len+0] = 0x00;
+ ptr[len+1] = 0x00;
+
+ skb = PKTTONATIVE(dhdp->osh, p);
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ }
+ else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ }
+ break;
+ } /* case WLC_E_BTA_HCI_EVENT */
+#endif /* WLBTAMP */
+
+ default:
+ break;
+ }
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ dhd_os_sdunlock(dhd);
+ wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+ dhd_os_sdlock(dhd);
+#endif
+ return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+ return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+ int ret;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
+ }
+ return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+ int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd) {
+ ret = dhd_set_suspend(val, &dhd->pub);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+ return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd)
+ dhd->pub.dtim_skip = val;
+
+ return 0;
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *filterp = NULL;
+ int ret = 0;
+
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
+ return ret;
+ if (num >= dhd->pub.pktfilter_count)
+ return -EINVAL;
+ if (add_remove) {
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ dhd->pub.pktfilter[num] = filterp;
+ return ret;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhd && dhd->pub.up) {
+ if (dhd->pub.in_suspend) {
+ if (!val || (val && !dhd->pub.suspend_disable_flag))
+ dhd_set_packet_filter(val, &dhd->pub);
+ }
+ }
+ return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ if (!dhd->pub.hang_was_sent) {
+ dhd->pub.hang_was_sent = 1;
+#if defined(CONFIG_WIRELESS_EXT)
+ ret = wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+ ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+ dev_close(dev);
+ dev_open(dev);
+#endif
+ }
+ }
+ return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd && dhd->pub.up)
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
+
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+ return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
+ }
+ return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+ if (!fp) {
+ printf("%s: open file error\n", __FUNCTION__);
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ MFREE(dhd->osh, buf, size);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_timeout_enable)
+ wake_lock_timeout(&dhd->wl_rxwake,
+ msecs_to_jiffies(dhd->wakelock_timeout_enable));
+#endif
+ dhd->wakelock_timeout_enable = 0;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_timeout_enable)
+ dhd->wakelock_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout_enable(&dhd->pub, val);
+ return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_lock(&dhd->wl_wifi);
+#endif
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter) {
+ dhd->wakelock_counter--;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_unlock(&dhd->wl_wifi);
+#endif
+ ret = dhd->wakelock_counter;
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_check_wakelock(void *dhdp)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+ dhd_info_t *dhd;
+
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd && wake_lock_active(&dhd->wl_wifi))
+ return 1;
+#endif
+ return 0;
+}
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_check_if_up(void *dhdp)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+
+ if (!pub)
+ return 0;
+ return pub->up;
+}
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+ int ifidx;
+ int ret = 0;
+ dhd_info_t *dhd = NULL;
+
+ if (!net || !netdev_priv(net)) {
+ DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd = *(dhd_info_t **)netdev_priv(net);
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+ struct net_device *net;
+
+ net = dhd_idx2net(dhdp, ifidx);
+ return dhd_check_hang(net, dhdp, ret);
+}
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid,
+ uint8 iftype, uint8* ea);
+extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
+
+int dhd_wlfc_interface_event(struct dhd_info *dhd,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
+}
+
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
+}
+
+int dhd_wlfc_event(struct dhd_info *dhd)
+{
+ return dhd_wlfc_enable(&dhd->pub);
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
+
+ return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ loff_t pos = -1;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
+ }
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
+ }
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+ int err;
+
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ err = PTR_ERR(g_dbgfs.debugfs_dir);
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbg_create();
+
+ return;
+}
+
+void dhd_dbg_remove(void)
+{
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
+
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct sk_buff *skb;
+ uint32 htsf = 0;
+ uint16 dport = 0, oldmagic = 0xACAC;
+ char *p1;
+ htsfts_t ts;
+
+ /* timestamp packet */
+
+ p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/* memcpy(&proto, p1+26, 4); */
+ memcpy(&dport, p1+40, 2);
+/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
+ dport = ntoh16(dport);
+ }
+
+ /* timestamp only if icmp or udb iperf with port 5555 */
+/* if (proto == 17 && dport == tsport) { */
+ if (dport >= tsport && dport <= tsport + 20) {
+
+ skb = (struct sk_buff *) pktbuf;
+
+ htsf = dhd_get_htsf(dhd, 0);
+ memset(skb->data + 44, 0, 2); /* clear checksum */
+ memcpy(skb->data+82, &oldmagic, 2);
+ memcpy(skb->data+84, &htsf, 4);
+
+ memset(&ts, 0, sizeof(htsfts_t));
+ ts.magic = HTSFMAGIC;
+ ts.prio = PKTPRIO(pktbuf);
+ ts.seqnum = htsf_seqnum++;
+ ts.c10 = get_cycles();
+ ts.t10 = htsf;
+ ts.endmagic = HTSFENDMAGIC;
+
+ memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+ }
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+ int pktcnt = 0, curval = 0, i;
+ for (i = 0; i < (NUMBIN-2); i++) {
+ curval += 500;
+ printf("%d ", his->bin[i]);
+ pktcnt += his->bin[i];
+ }
+ printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+ his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+ int i, binval = 0;
+
+ if (value < 0) {
+ histo->bin[NUMBIN-1]++;
+ return;
+ }
+ if (value > histo->bin[NUMBIN-2]) /* store the max value */
+ histo->bin[NUMBIN-2] = value;
+
+ for (i = 0; i < (NUMBIN-2); i++) {
+ binval += 500; /* 500m s bins */
+ if (value <= binval) {
+ histo->bin[i]++;
+ return;
+ }
+ }
+ histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ char *p1;
+ uint16 old_magic;
+ int d1, d2, d3, end2end;
+ htsfts_t *htsf_ts;
+ uint32 htsf;
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+ memcpy(&old_magic, p1+78, 2);
+ htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+ }
+ else
+ return;
+
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+ htsf_ts->cE0 = get_cycles();
+ }
+
+ if (old_magic == 0xACAC) {
+
+ tspktcnt++;
+ htsf = dhd_get_htsf(dhd, 0);
+ memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+ memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+ d1 = ts[tsidx].t2 - ts[tsidx].t1;
+ d2 = ts[tsidx].t3 - ts[tsidx].t2;
+ d3 = ts[tsidx].t4 - ts[tsidx].t3;
+ end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+ sorttobin(d1, &vi_d1);
+ sorttobin(d2, &vi_d2);
+ sorttobin(d3, &vi_d3);
+ sorttobin(end2end, &vi_d4);
+
+ if (end2end > 0 && end2end > maxdelay) {
+ maxdelay = end2end;
+ maxdelaypktno = tspktcnt;
+ memcpy(&maxdelayts, &ts[tsidx], 16);
+ }
+ if (++tsidx >= TSMAX)
+ tsidx = 0;
+ }
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+ uint32 htsf = 0, cur_cycle, delta, delta_us;
+ uint32 factor, baseval, baseval2;
+ cycles_t t;
+
+ t = get_cycles();
+ cur_cycle = t;
+
+ if (cur_cycle > dhd->htsf.last_cycle)
+ delta = cur_cycle - dhd->htsf.last_cycle;
+ else {
+ delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
+ }
+
+ delta = delta >> 4;
+
+ if (dhd->htsf.coef) {
+ /* times ten to get the first digit */
+ factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+ baseval = (delta*10)/factor;
+ baseval2 = (delta*10)/(factor+1);
+ delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+ htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+ }
+ else {
+ DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+ }
+
+ return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+ int i, max = 0;
+ int d1, d2, d3, d4, d5;
+
+ printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
+ for (i = 0; i < TSMAX; i++) {
+ d1 = ts[i].t2 - ts[i].t1;
+ d2 = ts[i].t3 - ts[i].t2;
+ d3 = ts[i].t4 - ts[i].t3;
+ d4 = ts[i].t4 - ts[i].t1;
+ d5 = ts[max].t4-ts[max].t1;
+ if (d4 > d5 && d4 > 0) {
+ max = i;
+ }
+ printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
+ ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+ d1, d2, d3, d4, i);
+ }
+
+ printf("current idx = %d \n", tsidx);
+
+ printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+ printf("%08X %08X %08X %08X \t%d %d %d %d\n",
+ maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+ maxdelayts.t2 - maxdelayts.t1,
+ maxdelayts.t3 - maxdelayts.t2,
+ maxdelayts.t4 - maxdelayts.t3,
+ maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+ uint32 s1, s2;
+
+ struct tsf {
+ uint32 low;
+ uint32 high;
+ } tsf_buf;
+
+ memset(&ioc, 0, sizeof(ioc));
+ memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "tsf");
+ s1 = dhd_get_htsf(dhd, 0);
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: tsf is not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ s2 = dhd_get_htsf(dhd, 0);
+
+ memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+ printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+ tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+ dhd->htsf.coefdec2, s2-tsf_buf.low);
+ printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+ return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+ static ulong cur_cycle = 0, prev_cycle = 0;
+ uint32 htsf, tsf_delta = 0;
+ uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+ ulong b, a;
+ cycles_t t;
+
+ /* cycles_t in inlcude/mips/timex.h */
+
+ t = get_cycles();
+
+ prev_cycle = cur_cycle;
+ cur_cycle = t;
+
+ if (cur_cycle > prev_cycle)
+ cyc_delta = cur_cycle - prev_cycle;
+ else {
+ b = cur_cycle;
+ a = prev_cycle;
+ cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+ }
+
+ if (data == NULL)
+ printf(" tsf update ata point er is null \n");
+
+ memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+ memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+ if (cur_tsf.low == 0) {
+ DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+ return;
+ }
+
+ if (cur_tsf.low > prev_tsf.low)
+ tsf_delta = (cur_tsf.low - prev_tsf.low);
+ else {
+ DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+ cur_tsf.low, prev_tsf.low));
+ if (cur_tsf.high > prev_tsf.high) {
+ tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+ DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
+ }
+ else
+ return; /* do not update */
+ }
+
+ if (tsf_delta) {
+ hfactor = cyc_delta / tsf_delta;
+ tmp = (cyc_delta - (hfactor * tsf_delta))*10;
+ dec1 = tmp/tsf_delta;
+ dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+ tmp = (tmp - (dec1*tsf_delta))*10;
+ dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+ if (dec3 > 4) {
+ if (dec2 == 9) {
+ dec2 = 0;
+ if (dec1 == 9) {
+ dec1 = 0;
+ hfactor++;
+ }
+ else {
+ dec1++;
+ }
+ }
+ else
+ dec2++;
+ }
+ }
+
+ if (hfactor) {
+ htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
+ dhd->htsf.coef = hfactor;
+ dhd->htsf.last_cycle = cur_cycle;
+ dhd->htsf.last_tsf = cur_tsf.low;
+ dhd->htsf.coefdec1 = dec1;
+ dhd->htsf.coefdec2 = dec2;
+ }
+ else {
+ htsf = prev_tsf.low;
+ }
+}
+
+#endif /* WLMEDIA_HTSF */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 000000000000..290caf7e658c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,39 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c 291086 2011-10-21 01:17:24Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+ int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+ return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 000000000000..e420166a95a9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,109 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h 303834 2011-12-20 06:17:39Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT 20000 /* In milli second */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_enque_sendq(void* state, int prec, void* p);
+extern int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx);
+extern void dhd_wlfc_cleanup(dhd_pub_t *dhd);
+#endif /* PROP_TXSTATUS */
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+ uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 000000000000..0ec95d265f3b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,6962 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c 329638 2012-04-26 05:41:43Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hndrte_armtrap.h>
+#include <hndrte_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
+#endif
+
+#define QLEN 256 /* bulk rx and tx queue lengths */
+#define FCHI (QLEN - 10)
+#define FCLOW (FCHI / 2)
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
+
+#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD 32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ 32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+#if defined(OOB_INTR_ONLY)
+extern void bcmsdh_set_irq(int flag);
+#endif /* defined(OOB_INTR_ONLY) */
+#ifdef PROP_TXSTATUS
+extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+#endif
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192
+#define CONSOLE_BUFFER_MAX 2024
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hndrte_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+#define REMAP_ENAB(bus) ((bus)->remap)
+#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define KSO_ENAB(bus) ((bus)->kso)
+#define SR_ENAB(bus) ((bus)->_srenab)
+#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto))
+#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618)
+#define MIN_RSRC_SR 0x3
+#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c)
+#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1)
+
+#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup)
+#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */
+#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */
+#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */
+#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0)
+
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+
+ bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+
+ sdpcmd_regs_t *regs; /* Registers for SDIO core */
+ uint sdpcmrev; /* SDIO core revision */
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 srmemsize; /* Size of SRMEM */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+ const char *nvram_params; /* user specified nvram params. */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ uint8 flowcontrol; /* per prio flow control bitmask */
+ uint8 tx_seq; /* Transmit sequence number (next) */
+ uint8 tx_max; /* Maximum transmit sequence allowed */
+
+ uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+ uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ uint16 nextlen; /* Next Read Len from last header */
+ uint8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ void *glomd; /* Packet containing glomming descriptor */
+ void *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ uint8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ uint8 *rxctl; /* Aligned pointer into rxbuf */
+ uint8 *databuf; /* Buffer for receiving big glom packet */
+ uint8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ uint8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef DHD_DEBUG
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+ uint regfails; /* Count of R_REG/W_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ int32 idletime; /* Control for activity timeout */
+ int32 idlecount; /* Activity timeout counter */
+ int32 idleclock; /* How to set bus driver when idle */
+ int32 sd_divisor; /* Speed control to bus driver */
+ int32 sd_mode; /* Mode control to bus driver */
+ int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
+ bool use_rxchain; /* If dhd should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ uint rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+ /* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+#ifdef SDTEST
+ /* external loopback */
+ bool ext_loop;
+ uint8 loopid;
+
+ /* pktgen configuration */
+ uint pktgen_freq; /* Ticks between bursts */
+ uint pktgen_count; /* Packets to send each burst */
+ uint pktgen_print; /* Bursts between count displays */
+ uint pktgen_total; /* Stop after this many */
+ uint pktgen_minlen; /* Minimum packet data len */
+ uint pktgen_maxlen; /* Maximum packet data len */
+ uint pktgen_mode; /* Configured mode: tx, rx, or echo */
+ uint pktgen_stop; /* Number of tx failures causing stop */
+
+ /* active pktgen fields */
+ uint pktgen_tick; /* Tick counter for bursts */
+ uint pktgen_ptick; /* Burst counter for printing */
+ uint pktgen_sent; /* Number of test packets generated */
+ uint pktgen_rcvd; /* Number of test packets received */
+ uint pktgen_fail; /* Number of failed send attempts */
+ uint16 pktgen_len; /* Length of next packet to send */
+#define PKTGEN_RCV_IDLE (0)
+#define PKTGEN_RCV_ONGOING (1)
+ uint16 pktgen_rcv_state; /* receive state */
+ uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ uint8 *ctrl_frame_buf;
+ uint32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+ uint32 rxint_mode; /* rx interrupt mode */
+ bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
+ * Available with socram rev 16
+ * Remap region not DMA-able
+ */
+ bool kso;
+ bool _slpauto;
+ bool _oobwakeup;
+ bool _srenab;
+ bool readframes;
+ bool reqbussleep;
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#define DHD_NOPMU(dhd) (FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+#define ALIGNMENT 4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align) \
+ do { \
+ uint datalign; \
+ datalign = (uintptr)PKTDATA((osh), (p)); \
+ datalign = ROUNDUP(datalign, (align)) - datalign; \
+ ASSERT(datalign < (align)); \
+ ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
+ if (datalign) \
+ PKTPULL((osh), (p), datalign); \
+ PKTSETLEN((osh), (p), (len)); \
+ } while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ regvar = R_REG(bus->dhd->osh, regaddr); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) { \
+ DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ regvar = 0; \
+ } \
+ } \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ W_REG(bus->dhd->osh, regaddr, regval); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) \
+ DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ } \
+} while (0)
+
+#define BUS_WAKE(bus) \
+ do { \
+ if ((bus)->sleeping) \
+ dhdsdio_bussleep((bus), FALSE); \
+ } while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0: Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1: (sdiod core rev >= 4)
+ * Device sets a new bit in the intstatus whenever there is a packet
+ * available in fifo. Host can't clear this specific status bit until all the
+ * packets are read from the FIFO. No need to ack dongle intstatus.
+ * Mode 2: (sdiod core rev >= 4)
+ * Device sets a bit in the intstatus, and host acks this by writing
+ * one to this bit. Dongle won't generate anymore packet interrupts
+ * until host reads all the packets from the dongle and reads a zero to
+ * figure that there are no more packets. No need to disable host ints.
+ * Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) \
+ ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+ void * regsva, uint16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+ bool reset_flag);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_memsize, min_size));
+ if ((dhd_dongle_memsize > min_size) &&
+ (dhd_dongle_memsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+ int err = 0;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+ return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+ uint32 val, addr, data;
+
+ bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+ /* Set device for gpio1 wakeup */
+ bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+ val = bcmsdh_reg_read(bus->sdh, data, 4);
+ val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+ bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+ bus->_oobwakeup = TRUE;
+
+ return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+ bool cap = FALSE;
+ uint32 min = 0, core_capext, addr, data;
+ if (bus->sih->chip == BCM4324_CHIP_ID) {
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+ bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+ core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+ } else {
+ core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
+ core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+ }
+ if (!(core_capext))
+ return FALSE;
+
+ min = bcmsdh_reg_read(bus->sdh, MIN_RSRC_ADDR, 4);
+ if (min == MIN_RSRC_SR) {
+ cap = TRUE;
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev < 3)) {
+ cap = FALSE;
+
+ DHD_ERROR(("Only 4334 >= B2 supports SR: curr rev %d\n",
+ bus->sih->chiprev));
+ }
+ }
+
+ return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+
+ bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+ dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+ return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+ dhdsdio_srwar_init(bus);
+
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+ 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+
+ /* Add CMD14 Support */
+ dhdsdio_devcap_set(bus,
+ (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+
+ bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+ bus->_srenab = TRUE;
+
+ return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ /* set flag */
+ bus->kso = TRUE;
+
+ /*
+ * Enable KeepSdioOn (KSO) bit for normal operation
+ * Default is 0 (4334A0) so set it. Fixed in B0.
+ */
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+ if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+ if (err)
+ DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+ }
+
+ return 0;
+}
+
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+ uint8 val = 0;
+ int err = 0;
+
+ /* Don't read here since sdio could be off so just write only */
+ val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+
+ if (err)
+ DHD_TRACE(("%s: KSO toggle %d failed: %d\n", __FUNCTION__, on, err));
+ return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0;
+
+ if (on == FALSE) {
+
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ dhdsdio_clk_kso_enab(bus, FALSE);
+ } else {
+ DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+
+ /* Double-write to be safe in case transition of AOS */
+ dhdsdio_clk_kso_enab(bus, TRUE);
+ dhdsdio_clk_kso_enab(bus, TRUE);
+ OSL_DELAY(4000);
+
+ /* Wait for device ready during transition to wake-up */
+ SPINWAIT(((dhdsdio_sleepcsr_get(bus)) !=
+ (SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
+ SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)),
+ (10000));
+
+ DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+ dhdsdio_sleepcsr_get(bus)));
+ }
+
+ bus->kso = on;
+ BCM_REFERENCE(err);
+
+ return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+ int err = 0;
+ uint8 val = 0;
+
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+ if (err)
+ DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+ return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+ return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+ int err = 0;
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+ if (err)
+ DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+ return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0, retry;
+ uint8 val;
+
+ retry = 0;
+ if (on == TRUE) {
+ /* Enter Sleep */
+
+ /* Be sure we request clk before going to sleep
+ * so we can wake-up with clk request already set
+ * else device can go back to sleep immediately
+ */
+ if (!SLPAUTO_ENAB(bus))
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ else {
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if ((val & SBSDIO_CSR_MASK) == 0) {
+ DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+ __FUNCTION__, val));
+
+ /* Reset clock request */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
+ DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ }
+ }
+
+ DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+ err = dhdsdio_clk_kso_enab(bus, FALSE);
+ if (OOB_WAKEUP_ENAB(bus))
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
+#endif
+ } else {
+ /* Exit Sleep */
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+ SPINWAIT((bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+ GPIO_DEV_SRSTATE_TIMEOUT);
+
+ if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+ DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+ }
+ }
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (SLPAUTO_ENAB(bus) && (err != 0)) {
+ OSL_DELAY(10000);
+ DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+
+ if (err) {
+ OSL_DELAY(10000);
+ DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (err) {
+ DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+ DHD_ERROR(("%s: FATAL: Device non-response!\n",
+ __FUNCTION__));
+ err = 0;
+ }
+ }
+ }
+#else
+ if (OOB_WAKEUP_ENAB(bus))
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
+
+ do {
+ err = dhdsdio_clk_kso_enab(bus, TRUE);
+ OSL_DELAY(10000);
+ } while ((err != 0) && (++retry < 3));
+
+ if (err != 0) {
+ DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+ err = 0; /* continue anyway */
+ }
+#endif /* !USE_CMD14 */
+
+ if (err == 0) {
+ uint8 csr;
+
+ /* Wait for device ready during transition to wake-up */
+ SPINWAIT((((csr = dhdsdio_sleepcsr_get(bus)) &
+ SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+ (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (10000));
+
+ DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+ if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+ DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+ __FUNCTION__, csr));
+ err = BCME_NODEVICE;
+ }
+
+ SPINWAIT((((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+ (SBSDIO_HT_AVAIL)), (10000));
+
+ }
+ }
+
+ /* Update if successful */
+ if (err == 0)
+ bus->kso = on ? FALSE : TRUE;
+ else {
+ DHD_ERROR(("%s: Sleep request failed: on:%d err:%d\n", __FUNCTION__, on, err));
+ }
+
+ return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+ int err;
+ uint8 clkctl, clkreq, devctl;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+ pendok = FALSE;
+#endif
+ clkctl = 0;
+ sdh = bus->sdh;
+
+
+ if (!KSO_ENAB(bus))
+ return BCME_OK;
+
+ if (SLPAUTO_ENAB(bus)) {
+ bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+ return BCME_OK;
+ }
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ if (pendok &&
+ ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+ uint32 dummy, retries;
+ R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+ BCM_REFERENCE(dummy);
+ }
+
+ /* Check current status */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ DHD_INFO(("CLKCTL: set PENDING\n"));
+ bus->clkstate = CLK_PENDING;
+ return BCME_OK;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+ }
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+ __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+ return BCME_ERROR;
+ }
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+ if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+ if (!SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+ }
+#endif /* !defined(BCMLXSDMMC) */
+ } else {
+ if (SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+ }
+ }
+#endif /* defined (DHD_DEBUG) */
+
+ bus->activity = TRUE;
+ } else {
+ clkreq = 0;
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ if (!SR_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ DHD_INFO(("CLKCTL: turned OFF\n"));
+ if (err) {
+ DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+ int err;
+ int32 iovalue;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (on) {
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ /* Turn on clock and restore mode */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ iovalue = bus->sd_mode;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_mode: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Restore clock speed */
+ iovalue = bus->sd_divisor;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_SDONLY;
+ } else {
+ /* Stop or slow the SD clock itself */
+ if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+ DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+ __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+ return BCME_ERROR;
+ }
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ if (sd1idle) {
+ /* Change to SD1 mode and turn off clock */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+
+ iovalue = 0;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Set divisor to idle value */
+ iovalue = bus->idleclock;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_NONE;
+ }
+
+ return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+ int ret = BCME_OK;
+#ifdef DHD_DEBUG
+ uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ return ret;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ dhdsdio_sdclk(bus, TRUE);
+ /* Now request HT Avail on the backplane */
+ ret = dhdsdio_htclk(bus, TRUE, pendok);
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ ret = dhdsdio_sdclk(bus, TRUE);
+ else if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ else
+ DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+ bus->clkstate, target));
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ }
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ /* Now remove the SD clock */
+ ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+ if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+ if (bus->poll == 0)
+ dhd_os_wd_timer(bus->dhd, 0);
+ break;
+ }
+#ifdef DHD_DEBUG
+ DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+ return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+ int err = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return BCME_OK;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return BCME_BUSY;
+
+
+ if (!SLPAUTO_ENAB(bus)) {
+ /* Disable SDIO interrupts (no longer interested) */
+ bcmsdh_intr_disable(bus->sdh);
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ if (bus->sih->chip != BCM4329_CHIP_ID &&
+ bus->sih->chip != BCM4319_CHIP_ID) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ }
+ } else {
+ /* Leave interrupts enabled since device can exit sleep and
+ * interrupt host
+ */
+ err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+ }
+
+ /* Change state */
+ bus->sleeping = TRUE;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ if (!SLPAUTO_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+ /* Force pad isolation off if possible (in case power never toggled) */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Enable interrupts again */
+ if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ } else {
+ err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+ }
+
+ if (err == 0) {
+ /* Change state */
+ bus->sleeping = FALSE;
+ }
+ }
+
+ return err;
+}
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+ bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (enable == TRUE) {
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ } else {
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ }
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+ int ret;
+ osl_t *osh;
+ uint8 *frame;
+ uint16 len, pad1 = 0;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh;
+ void *new;
+ int i;
+#ifdef WLMEDIA_HTSF
+ char *p;
+ htsfts_t *htsf_ts;
+#endif
+
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+ if (PKTLEN(osh, pkt) >= 100) {
+ p = PKTDATA(osh, pkt);
+ htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->c20 = get_cycles();
+ htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ /* Add alignment padding, allocate new packet if needed */
+ if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
+ if (PKTHEADROOM(osh, pkt) < pad1) {
+ DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
+ __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
+ bus->dhd->tx_realloc++;
+ new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+ if (!new) {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ /* free the pkt if canned one is not used */
+ free_pkt = TRUE;
+ pkt = new;
+ frame = (uint8*)PKTDATA(osh, pkt);
+ ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+ pad1 = 0;
+ } else {
+ PKTPUSH(osh, pkt, pad1);
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+ bzero(frame, pad1 + SDPCM_HDRLEN);
+ }
+ }
+ ASSERT(pad1 < DHD_SDALIGN);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (uint16)PKTLEN(osh, pkt);
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
+ tx_packets[PKTPRIO(pkt)]++;
+ }
+ if (DHD_BYTES_ON() &&
+ (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad2 = bus->blocksize - (len % bus->blocksize);
+ if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
+#ifdef NOTUSED
+ if (pad2 <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+ len += pad2;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+ if (PKTTAILROOM(osh, pkt))
+#endif
+ len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+ else
+ DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+ }
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, pkt, NULL, NULL);
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
+ dhd_os_sdlock(bus->dhd);
+ } else {
+#endif /* PROP_TXSTATUS */
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+
+#ifdef PROP_TXSTATUS
+ }
+#endif
+ return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
+ }
+#endif /* SDTEST */
+
+ /* Add space for the header */
+ PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+#ifndef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+ pktq_len(&bus->txq)));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+#ifndef DHDTHREAD
+ /* Need to also release txqlock before releasing sdlock.
+ * This thread still has txqlock and releases sdlock.
+ * Deadlock happens when dpc() grabs sdlock first then
+ * attempts to grab txqlock.
+ */
+ dhd_os_sdunlock_txq(bus->dhd);
+ dhd_os_sdunlock(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state)
+ dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
+ else
+#endif
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+#ifndef DHDTHREAD
+ dhd_os_sdlock(bus->dhd);
+ dhd_os_sdlock_txq(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ /* let the caller decide whether to free the packet */
+ if (!bus->dhd->wlfc_state)
+#endif
+ PKTFREE(osh, pkt, TRUE);
+ ret = BCME_NORESOURCE;
+ }
+ else
+ ret = BCME_OK;
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+
+#ifdef DHD_DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+#ifdef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+ }
+
+#ifndef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+ void *pkt;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ uint8 tx_prec_map;
+
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+ dhd_os_sdlock_txq(bus->dhd);
+ if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+ dhd_os_sdunlock_txq(bus->dhd);
+ break;
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+ datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, &regs->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+ dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+
+ return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ uint8 *frame;
+ uint16 len;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint8 doff = 0;
+ int ret = -1;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ if (dhd_alignctl) {
+ if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ bzero(frame, doff + SDPCM_HDRLEN);
+ }
+ ASSERT(doff < DHD_SDALIGN);
+ }
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (len & (ALIGNMENT - 1)))
+ len = ROUNDUP(len, ALIGNMENT);
+
+ ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(uint16*)frame = htol16((uint16)msglen);
+ *(((uint16*)frame) + 1) = htol16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!TXCTLOK(bus)) {
+ DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq));
+ bus->ctrl_frame_stat = TRUE;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == FALSE) {
+ DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+ ret = 0;
+ } else {
+ bus->dhd->txcnt_timeout++;
+ if (!bus->dhd->hang_was_sent)
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout));
+ ret = -1;
+ bus->ctrl_frame_stat = FALSE;
+ goto done;
+ }
+ }
+
+ bus->dhd->txcnt_timeout = 0;
+
+ if (ret == -1) {
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retries++ < TXRETRIES);
+ }
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (ret)
+ bus->dhd->tx_ctlerrs++;
+ else
+ bus->dhd->tx_ctlpkts++;
+
+ if (bus->dhd->txcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __FUNCTION__, rxlen, msglen));
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ if (!SLPAUTO_ENAB(bus)) {
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+ }
+#endif /* DHD_DEBUG */
+ } else if (pending == TRUE) {
+ DHD_CTL(("%s: canceled\n", __FUNCTION__));
+ return -ERESTARTSYS;
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+ }
+ if (timeleft == 0) {
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+ }
+ else
+ bus->dhd->rxcnt_timeout = 0;
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_POLLRATE,
+ IOV_SDREG,
+ IOV_SBREG,
+ IOV_SDCIS,
+ IOV_MEMBYTES,
+ IOV_MEMSIZE,
+#ifdef DHD_DEBUG
+ IOV_CHECKDIED,
+ IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+ IOV_SET_DOWNLOAD_STATE,
+ IOV_SOCRAM_STATE,
+ IOV_FORCEEVEN,
+ IOV_SDIOD_DRIVE,
+ IOV_READAHEAD,
+ IOV_SDRXCHAIN,
+ IOV_ALIGNCTL,
+ IOV_SDALIGN,
+ IOV_DEVRESET,
+ IOV_CPU,
+#ifdef SDTEST
+ IOV_PKTGEN,
+ IOV_EXTLOOP,
+#endif /* SDTEST */
+ IOV_SPROM,
+ IOV_TXBOUND,
+ IOV_RXBOUND,
+ IOV_TXMINMAX,
+ IOV_IDLETIME,
+ IOV_IDLECLOCK,
+ IOV_SD1IDLE,
+ IOV_SLEEP,
+ IOV_DONGLEISOLATION,
+ IOV_KSO,
+ IOV_DEVSLEEP,
+ IOV_DEVCAP,
+ IOV_VARS,
+#ifdef SOFTAP
+ IOV_FWPATH
+#endif
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+ {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
+ {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 },
+ {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 },
+ {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
+ {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 },
+ {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
+ {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 },
+ {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 },
+ {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 },
+ {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 },
+ {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 },
+ {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 },
+ {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
+ {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 },
+ {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 },
+ {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+ {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
+ {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+ {"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
+ {"kso", IOV_KSO, 0, IOVT_UINT32, 0 },
+ {"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 },
+#ifdef SOFTAP
+ {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+ {NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+ uint q1, q2;
+
+ if (!div) {
+ bcm_bprintf(strbuf, "%s N/A", desc);
+ } else {
+ q1 = num / div;
+ q2 = (100 * (num - (q1 * div))) / div;
+ bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ }
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+ bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+ bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+ bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+ bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->rxlen, bus->rx_seq);
+ bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+ bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+ bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+ bus->pollrate, bus->pollcnt, bus->regfails);
+
+ bcm_bprintf(strbuf, "\nAdditional counters:\n");
+ bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+ bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+ bus->rxc_errors);
+ bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+ bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+ bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+ bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+ bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+ bus->f2txdata, bus->f1regdata);
+ {
+ dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->dhd->rx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ (bus->f2txdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+ bcm_bprintf(strbuf, "\n\n");
+ }
+
+#ifdef SDTEST
+ if (bus->pktgen_count) {
+ bcm_bprintf(strbuf, "pktgen config and count:\n");
+ bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+ bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+ bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+ bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+ bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+ }
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+ bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+ bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+ bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+ bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+ bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+ bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+ bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+ bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+ bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+
+ pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.freq = bus->pktgen_freq;
+ pktgen.count = bus->pktgen_count;
+ pktgen.print = bus->pktgen_print;
+ pktgen.total = bus->pktgen_total;
+ pktgen.minlen = bus->pktgen_minlen;
+ pktgen.maxlen = bus->pktgen_maxlen;
+ pktgen.numsent = bus->pktgen_sent;
+ pktgen.numrcvd = bus->pktgen_rcvd;
+ pktgen.numfail = bus->pktgen_fail;
+ pktgen.mode = bus->pktgen_mode;
+ pktgen.stop = bus->pktgen_stop;
+
+ bcopy(&pktgen, arg, sizeof(pktgen));
+
+ return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+ uint oldcnt, oldmode;
+
+ bcopy(arg, &pktgen, sizeof(pktgen));
+ if (pktgen.version != DHD_PKTGEN_VERSION)
+ return BCME_BADARG;
+
+ oldcnt = bus->pktgen_count;
+ oldmode = bus->pktgen_mode;
+
+ bus->pktgen_freq = pktgen.freq;
+ bus->pktgen_count = pktgen.count;
+ bus->pktgen_print = pktgen.print;
+ bus->pktgen_total = pktgen.total;
+ bus->pktgen_minlen = pktgen.minlen;
+ bus->pktgen_maxlen = pktgen.maxlen;
+ bus->pktgen_mode = pktgen.mode;
+ bus->pktgen_stop = pktgen.stop;
+
+ bus->pktgen_tick = bus->pktgen_ptick = 0;
+ bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+ bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+ /* Clear counts for a new pktgen (mode change, or was stopped) */
+ if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+ bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+ return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+ uint8 enable, protect, remap;
+
+ si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+ remap = val ? TRUE : FALSE;
+ si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint32 sdaddr;
+ uint dsize;
+
+ /* In remap mode, adjust address beyond socram and redirect
+ * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+ * is not backplane accessible
+ */
+ if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+ address -= bus->orig_ramsize;
+ address += SOCDEVRAM_BP_ADDR;
+ }
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+ (address & SBSDIO_SBWINDOW_MASK)));
+ if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+ DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ break;
+ }
+ sdaddr = 0;
+ dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+ DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+ bcmsdh_cur_sbwad(bus->sdh)));
+ }
+
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+ uint32 addr;
+ int rv, i;
+ uint32 shaddr = 0;
+
+ shaddr = bus->ramsize - 4;
+
+ i = 0;
+ do {
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+ return rv;
+
+ addr = ltoh32(addr);
+
+ DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+ if ((bus->srmemsize > 0) && (i++ == 0)) {
+ shaddr -= bus->srmemsize;
+ } else {
+ DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+ __FUNCTION__, addr));
+ return BCME_ERROR;
+ }
+ } else
+ break;
+ } while (i < 2);
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+ return BCME_OK;
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __FUNCTION__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX 192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ if (!KSO_ENAB(bus))
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ char *console_buffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ trap_t tr;
+ sdpcm_shared_t sdpcm_shared;
+ struct bcmstrbuf strbuf;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, i, addr;
+ int rv;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+ if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (sdpcm_shared.assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (sdpcm_shared.assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+ }
+
+ if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.trap_addr,
+ (uint8*)&tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bcm_bprintf(&strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+ ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+ ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+ ltoh32(sdpcm_shared.trap_addr),
+ ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+ ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0)
+ goto printbuf;
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ goto printbuf;
+
+ if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0)
+ goto printbuf;
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ if (dhd_msg_level & DHD_ERROR_VAL)
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+ }
+ }
+
+printbuf:
+ if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+
+ return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Basic sanity checks */
+ if (bus->dhd->up) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24)
+#define CC_CHIPCTRL_JTAG_SEL (1 << 3)
+#define CC_CHIPCTRL_GPIO_SEL (0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+ int int_val;
+ uint32 addr, data, uart_enab = 0;
+ uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+ uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+ *bcmerror = 0;
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+ }
+ else if (bus->sih->chip == BCM4334_CHIP_ID) {
+ if (enable) {
+ /* Moved to PMU chipcontrol 1 from 4330 */
+ int_val &= ~gpio_sel;
+ int_val |= jtag_sel;
+ } else {
+ int_val |= gpio_sel;
+ int_val &= ~jtag_sel;
+ }
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+ }
+
+ if (!set)
+ return (int_val & uart_enab);
+ if (enable)
+ int_val |= uart_enab;
+ else
+ int_val &= ~uart_enab;
+ bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uint32 chipcontrol;
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+ chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+ chipcontrol &= ~jtag_sel;
+ if (enable) {
+ chipcontrol |= jtag_sel;
+ chipcontrol &= ~gpio_sel;
+ }
+ bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+ }
+
+ return (int_val & uart_enab);
+}
+#endif
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+ /* Some ioctls use the bus */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /*
+ * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+ */
+ if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+ dhdsdio_clk_kso_iovar(bus, bool_val);
+ goto exit;
+ } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+ {
+ dhdsdio_clk_devsleep_iovar(bus, bool_val);
+ if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+ DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+ bus->dpc_sched));
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ }
+ }
+ goto exit;
+ }
+
+ /* Handle sleep stuff before any clock mucking */
+ if (vi->varid == IOV_SLEEP) {
+ if (IOV_ISSET(actionid)) {
+ bcmerror = dhdsdio_bussleep(bus, bool_val);
+ } else {
+ int_val = (int32)bus->sleeping;
+ bcopy(&int_val, arg, val_size);
+ }
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ if (!bus->dhd->dongle_reset) {
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_INTR):
+ int_val = (int32)bus->intr;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_INTR):
+ bus->intr = bool_val;
+ bus->intdis = FALSE;
+ if (bus->dhd->up) {
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ }
+ break;
+
+ case IOV_GVAL(IOV_POLLRATE):
+ int_val = (int32)bus->pollrate;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POLLRATE):
+ bus->pollrate = (uint)int_val;
+ bus->poll = (bus->pollrate != 0);
+ break;
+
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_IDLECLOCK):
+ int_val = (int32)bus->idleclock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLECLOCK):
+ bus->idleclock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SD1IDLE):
+ int_val = (int32)sd1idle;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SD1IDLE):
+ sd1idle = bool_val;
+ break;
+
+
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* If we know about SOCRAM, check for a fit */
+ if ((bus->orig_ramsize) &&
+ ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+ {
+ uint8 enable, protect, remap;
+ si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+ if (!enable || protect) {
+ DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+ __FUNCTION__, bus->orig_ramsize, size, address));
+ DHD_ERROR(("%s: socram enable %d, protect %d\n",
+ __FUNCTION__, enable, protect));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+ uint32 devramsize = si_socdevram_size(bus->sih);
+ if ((address < SOCDEVRAM_ARM_ADDR) ||
+ (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+ DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+ __FUNCTION__, address, size));
+ DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+ __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ /* move it such that address is real now */
+ address -= SOCDEVRAM_ARM_ADDR;
+ address += SOCDEVRAM_BP_ADDR;
+ DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+ __FUNCTION__, (set ? "write" : "read"), size, address));
+ } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+ /* Can not access remap region while devram remap bit is set
+ * ROM content would be returned in this case
+ */
+ DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+ __FUNCTION__, address));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_SDIOD_DRIVE):
+ int_val = (int32)dhd_sdiod_drive_strength;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIOD_DRIVE):
+ dhd_sdiod_drive_strength = int_val;
+ si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+ break;
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_SOCRAM_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_GVAL(IOV_READAHEAD):
+ int_val = (int32)dhd_readahead;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_READAHEAD):
+ if (bool_val && !dhd_readahead)
+ bus->nextlen = 0;
+ dhd_readahead = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDRXCHAIN):
+ int_val = (int32)bus->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDRXCHAIN):
+ if (bool_val && !bus->sd_rxchain)
+ bcmerror = BCME_UNSUPPORTED;
+ else
+ bus->use_rxchain = bool_val;
+ break;
+ case IOV_GVAL(IOV_ALIGNCTL):
+ int_val = (int32)dhd_alignctl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ALIGNCTL):
+ dhd_alignctl = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDALIGN):
+ int_val = DHD_SDALIGN;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_VARS):
+ if (bus->varsz < (uint)len)
+ bcopy(bus->vars, arg, bus->varsz);
+ else
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ /* Same as above, but offset is not backplane (not SDIO core) */
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ case IOV_GVAL(IOV_SDCIS):
+ {
+ *(char *)arg = 0;
+
+ bcmstrcat(arg, "\nFunc 0\n");
+ bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 1\n");
+ bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 2\n");
+ bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+
+ case IOV_GVAL(IOV_FORCEEVEN):
+ int_val = (int32)forcealign;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCEEVEN):
+ forcealign = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TXMINMAX):
+ int_val = (int32)dhd_txminmax;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXMINMAX):
+ dhd_txminmax = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_SERIALCONS):
+ int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+ if (bcmerror != 0)
+ break;
+
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SERIALCONS):
+ dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+ break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+ case IOV_GVAL(IOV_EXTLOOP):
+ int_val = (int32)bus->ext_loop;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_EXTLOOP):
+ bus->ext_loop = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_get(bus, arg);
+ break;
+
+ case IOV_SVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_set(bus, arg);
+ break;
+#endif /* SDTEST */
+
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_SVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+ __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+ bus->dhd->busstate));
+
+ ASSERT(bus->dhd->osh);
+ /* ASSERT(bus->cl_devid); */
+
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+ break;
+#ifdef SOFTAP
+ case IOV_GVAL(IOV_FWPATH):
+ {
+ uint32 fw_path_len;
+
+ fw_path_len = strlen(bus->fw_path);
+ DHD_INFO(("[softap] get fwpath, l=%d\n", len));
+
+ if (fw_path_len > len-1) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ if (fw_path_len) {
+ bcopy(bus->fw_path, arg, fw_path_len);
+ ((uchar*)arg)[fw_path_len] = 0;
+ }
+ break;
+ }
+
+ case IOV_SVAL(IOV_FWPATH):
+ DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
+
+ switch (int_val) {
+ case 1:
+ bus->fw_path = fw_path; /* ordinary one */
+ break;
+ case 2:
+ bus->fw_path = fw_path2;
+ break;
+ default:
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL")));
+ break;
+
+#endif /* SOFTAP */
+ case IOV_GVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+ /* Get its status */
+ int_val = (bool) bus->dhd->dongle_reset;
+ bcopy(&int_val, arg, val_size);
+
+ break;
+
+ case IOV_GVAL(IOV_KSO):
+ int_val = dhdsdio_sleepcsr_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DEVCAP):
+ int_val = dhdsdio_devcap_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DEVCAP):
+ dhdsdio_devcap_set(bus, (uint8) int_val);
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize, phys_size;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+ if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+ DHD_ERROR(("PR85623WAR in place\n"));
+ varsize += 4;
+ varaddr -= 4;
+ }
+ }
+
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray)
+ return BCME_NOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ phys_size, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((phys_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+ uint retries;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = TRUE;
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_disable(bus->sih, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Disable remap for download */
+ if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+ dhdsdio_devram_remap(bus, FALSE);
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Enable remap before ARM reset but after vars.
+ * No backplane access in remap mode
+ */
+ if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+ dhdsdio_devram_remap(bus, TRUE);
+
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to SDIOD core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Turn on clock in case SD command needs backplane */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+ /* Check for bus configuration changes of interest */
+
+ /* If it was divisor change, read the new one */
+ if (set && strcmp(name, "sd_divisor") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_divisor = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_divisor));
+ }
+ }
+ /* If it was a mode change, read the new one */
+ if (set && strcmp(name, "sd_mode") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_mode = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_mode));
+ }
+ }
+ /* Similar check for blocksize change */
+ if (set && strcmp(name, "sd_blocksize") == 0) {
+ int32 fnum = 2;
+ if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ osl_t *osh;
+ uint32 local_hostintmask;
+ uint8 saveclk;
+ uint retries;
+ int err;
+ if (!bus->dhd)
+ return;
+
+ osh = bus->dhd->osh;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitlockfree(NULL);
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Change our idea of bus state */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+
+ if (KSO_ENAB(bus)) {
+
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ }
+
+ /* Turn off the bus (F2), free any pending packets */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+ }
+
+ /* Turn off the backplane clock (only) */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ PKTFREE(osh, bus->glomd, FALSE);
+
+ if (bus->glom)
+ PKTFREE(osh, bus->glom, FALSE);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = FALSE;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+}
+
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ dhd_timeout_t tmo;
+ uint retries = 0;
+ uint8 ready, enable;
+ int err, ret = 0;
+ uint8 saveclk;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (bus->clkstate != CLK_AVAIL) {
+ DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+ goto exit;
+ }
+
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+ &bus->regs->tosbmailboxdata, retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+ ready = 0;
+ while (ready != enable && !dhd_timeout_expired(&tmo))
+ ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+ DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Make sure we're talking to the core. */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+ bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ ASSERT(bus->regs != NULL);
+
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ /* corerev 4 could use the newer interrupt logic to detect the frames */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+ (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+ bus->hostintmask &= ~I_HMB_FRAME_IND;
+ bus->hostintmask |= I_XMTDATA_AVAIL;
+ }
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ bus->intdis = FALSE;
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+
+ }
+
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ }
+
+ if (dhdsdio_sr_cap(bus))
+ dhdsdio_sr_init(bus);
+ else
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+ /* If we didn't come up, turn off backplane clock */
+ if (dhdp->busstate != DHD_BUS_DATA)
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+ uint16 lastrbc;
+ uint8 hi, lo;
+ int err;
+
+ DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+ (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return;
+ }
+
+ if (abort) {
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ }
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+ __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries) {
+ DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+ } else {
+ DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+ }
+
+ if (rtx) {
+ bus->rxrtx++;
+ W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ bus->f1regdata++;
+ if (retries <= retry_limit) {
+ bus->rxskip = TRUE;
+ }
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || bcmsdh_regfail(sdh))
+ bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint rdlen, pad;
+
+ int sdret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Control data already received in aligned rxctl */
+ if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+ goto gotpkt;
+
+ ASSERT(bus->rxbuf);
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+
+ /* Copy the already-read portion over */
+ bcopy(hdr, bus->rxctl, firstread);
+ if (len <= firstread)
+ goto gotpkt;
+
+ /* Copy the full data pkt in gSPI case and process ioctl. */
+ if (bus->bus == SPI_BUS) {
+ bcopy(hdr, bus->rxctl, len);
+ goto gotpkt;
+ }
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - firstread;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->dhd->maxctl))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + firstread) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+ __FUNCTION__, rdlen, bus->dhd->maxctl));
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ if ((len - doff) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+ bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("RxCtrl", bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+ uint16 dlen, totlen;
+ uint8 *dptr, num = 0;
+
+ uint16 sublen, check;
+ void *pfirst, *plast, *pnext, *save_pfirst;
+ osl_t *osh = bus->dhd->osh;
+
+ int errcode;
+ uint8 chan, seq, doff, sfdoff;
+ uint8 txmax;
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ dhd_os_sdlock_rxq(bus->dhd);
+
+ pfirst = plast = pnext = NULL;
+ dlen = (uint16)PKTLEN(osh, bus->glomd);
+ dptr = PKTDATA(osh, bus->glomd);
+ if (!dlen || (dlen & 1)) {
+ DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+ __FUNCTION__, dlen));
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = ltoh16_ua(dptr);
+ dlen -= sizeof(uint16);
+ dptr += sizeof(uint16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+ __FUNCTION__, num, sublen));
+ pnext = NULL;
+ break;
+ }
+ if (sublen % DHD_SDALIGN) {
+ DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+ __FUNCTION__, sublen, DHD_SDALIGN));
+ usechain = FALSE;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total is a block multiple */
+ if (!dlen) {
+ sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+ totlen = ROUNDUP(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+ DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ __FUNCTION__, num, sublen));
+ break;
+ }
+ ASSERT(!PKTLINK(pnext));
+ if (!pfirst) {
+ ASSERT(!plast);
+ pfirst = plast = pnext;
+ } else {
+ ASSERT(plast);
+ PKTSETNEXT(osh, plast, pnext);
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain in bus structure */
+ if (pnext) {
+ DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+ __FUNCTION__, totlen, num));
+ if (DHD_GLOM_ON() && bus->nextlen) {
+ if (totlen != bus->nextlen) {
+ DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+ "rxseq %d\n", __FUNCTION__, bus->nextlen,
+ totlen, rxseq));
+ }
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ PKTFREE(osh, pfirst, FALSE);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ PKTFREE(osh, bus->glomd, FALSE);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+
+ /* Ok -- either we just generated a packet chain, or had one from before */
+ if (bus->glom) {
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+ for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+ DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (uint8*)PKTDATA(osh, pnext),
+ PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (uint16)pkttotlen(osh, pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+ dlen, pfirst, NULL, NULL);
+ } else if (bus->dataptr) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, bus->dataptr,
+ dlen, NULL, NULL, NULL);
+ sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+ if (sublen != dlen) {
+ DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+ __FUNCTION__, dlen, sublen));
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+ ASSERT(errcode != BCME_PENDING);
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __FUNCTION__, dlen, errcode));
+ bus->dhd->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 48));
+ }
+#endif
+
+
+ /* Validate the superframe header */
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, sublen, check));
+ errcode = -1;
+ } else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+ DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+ DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+ SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+ DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ PKTPULL(osh, pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = PKTNEXT(osh, pnext)) {
+ dptr = (uint8 *)PKTDATA(osh, pnext);
+ dlen = (uint16)PKTLEN(osh, pnext);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("subframe", dptr, 32);
+ }
+#endif
+
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, num, sublen, check));
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ DHD_ERROR(("%s (subframe %d): length mismatch: "
+ "len 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, num, sublen, dlen));
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+ __FUNCTION__, num, chan));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ PKTPUSH(osh, pfirst, sfdoff);
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = PKTNEXT(osh, pfirst);
+ PKTSETNEXT(osh, pfirst, NULL);
+
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+ PKTLEN(osh, pfirst), sublen, chan, seq));
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+ if (rxseq != seq) {
+ DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Subframe Data", dptr, dlen);
+ }
+#endif
+
+ PKTSETLEN(osh, pfirst, sublen);
+ PKTPULL(osh, pfirst, doff);
+
+ reorder_info_len = sizeof(reorder_info_buf);
+
+ if (PKTLEN(osh, pfirst) == 0) {
+ PKTFREE(bus->dhd->osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ bus->dhd->rx_errors++;
+ PKTFREE(osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+ if (reorder_info_len) {
+ uint32 free_buf_count;
+ void *ppfirst;
+
+ ppfirst = pfirst;
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+ reorder_info_len, &ppfirst, &free_buf_count);
+
+ if (free_buf_count == 0) {
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+ else {
+ void *temp;
+
+ /* go to the end of the chain and attach the pnext there */
+ temp = ppfirst;
+ while (PKTNEXT(osh, temp) != NULL) {
+ temp = PKTNEXT(osh, temp);
+ }
+ pfirst = temp;
+ if (plast) {
+ PKTSETNEXT(osh, plast, ppfirst);
+ }
+ else {
+ /* first one in the chain */
+ save_pfirst = ppfirst;
+ }
+
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ }
+
+ num += (uint8)free_buf_count;
+ }
+ else {
+ /* this packet will go up, link back into chain and count it */
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ num++;
+ }
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+ __FUNCTION__, num, pfirst,
+ PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+ PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+ prhex("", (uint8 *)PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 32));
+ }
+#endif /* DHD_DEBUG */
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+ if (num) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num, 0);
+ dhd_os_sdlock(bus->dhd);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+ osl_t *osh = bus->dhd->osh;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ uint16 len, check; /* Extracted hardware header fields */
+ uint8 chan, seq, doff; /* Extracted software header fields */
+ uint8 fcbits; /* Extracted fcbits from software header */
+ uint8 delta;
+
+ void *pkt; /* Packet for event or data frames */
+ uint16 pad; /* Number of pad bytes to read */
+ uint16 rdlen; /* Total number of bytes to read */
+ uint8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from bcmsdh calls */
+ uint8 txmax; /* Maximum tx sequence offered */
+ bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+ uint8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+ uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+ bool sdtest = FALSE; /* To limit message spew from test mode */
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bus->readframes = TRUE;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+ bus->readframes = FALSE;
+ return 0;
+ }
+
+ ASSERT(maxframes);
+
+#ifdef SDTEST
+ /* Allow pktgen to override maxframes */
+ if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ maxframes = bus->pktgen_count;
+ sdtest = TRUE;
+ }
+#endif
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = FALSE;
+
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+#ifdef DHDTHREAD
+ /* tx more to improve rx performance */
+ if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) {
+ dhdsdio_sendfromq(bus, dhd_txbound);
+ }
+#endif /* DHDTHREAD */
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ uint8 cnt;
+ DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __FUNCTION__, bus->glomd, bus->glom));
+ cnt = dhdsdio_rxglom(bus, rxseq);
+ DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (dhd_readahead && bus->nextlen) {
+ uint16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ if (bus->bus == SPI_BUS) {
+ rdlen = len = nextlen;
+ }
+ else {
+ rdlen = len = nextlen << 4;
+
+ /* Pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+ }
+
+ /* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+ * Later we use buffer-poll for data as well as control packets.
+ * This is required because dhd receives full frame in gSPI unlike SDIO.
+ * After the frame is received we have to distinguish whether it is data
+ * or non-data frame.
+ */
+ /* Allocate a packet buffer */
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+ if (bus->bus == SPI_BUS) {
+ bus->usebufpool = FALSE;
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+ rxbuf = bus->rxctl;
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ } else {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+ "expected rxseq %d\n",
+ __FUNCTION__, len, rdlen, rxseq));
+ /* Just go try again w/normal header read */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ } else {
+ if (bus->bus == SPI_BUS)
+ bus->usebufpool = TRUE;
+
+ ASSERT(!PKTLINK(pkt));
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+ rxbuf = (uint8 *)PKTDATA(osh, pkt);
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ /* Force retry w/normal header read. Don't attempt NAK for
+ * gSPI
+ */
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ /* Now check the header */
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means readahead info was bad */
+ if (!(len|check)) {
+ DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+ __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+ len, check));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+ __FUNCTION__, len));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal header (may be >4K) */
+ DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+ " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+ seq));
+ bus->nextlen = 0;
+ }
+
+ bus->dhd->rx_readahead_cnt ++;
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", rxbuf, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ if (bus->bus == SPI_BUS) {
+ dhdsdio_read_control(bus, rxbuf, len, doff);
+ if (bus->usebufpool) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+ continue;
+ } else {
+ DHD_ERROR(("%s (nextlen): readahead on control"
+ " packet %d?\n", __FUNCTION__, seq));
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ dhdsdio_rxfail(bus, FALSE, TRUE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ }
+
+ if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+ DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+ "rx pktbuf's or not yet malloced.\n", len, chan));
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+ /* gSPI frames should not be handled in fractions */
+ if (bus->bus == SPI_BUS) {
+ break;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr, firstread, NULL, NULL, NULL);
+ bus->f2rxhdrs++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+ bus->rx_hdrfail++;
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ continue;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means no more frames */
+ if (!(len|check)) {
+ *finished = TRUE;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, len, check));
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+ bus->rx_badhdr++;
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+ (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+ /* Length to read */
+ rdlen = (len > firstread) ? (len - firstread) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ if ((rdlen + firstread) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ __FUNCTION__, rdlen, chan));
+ bus->dhd->rx_dropped++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+ continue;
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ ASSERT(!PKTLINK(pkt));
+
+ /* Leave room for what we already read, and align remainder */
+ ASSERT(firstread < (PKTLEN(osh, pkt)));
+ PKTPULL(osh, pkt, firstread);
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+ ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ PKTPUSH(osh, pkt, firstread);
+ bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+ __FUNCTION__, len));
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("Glom Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+ PKTSETLEN(osh, pkt, len);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ PKTSETLEN(osh, pkt, len);
+ PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+ /* Test channel packets are processed separately */
+ if (chan == SDPCM_TEST_CHANNEL) {
+ dhdsdio_testrcv(bus, pkt, seq);
+ continue;
+ }
+#endif /* SDTEST */
+
+ if (PKTLEN(osh, pkt) == 0) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ continue;
+ }
+ if (reorder_info_len) {
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+ &pkt, &pkt_count);
+ if (pkt_count == 0)
+ continue;
+ }
+ else
+ pkt_count = 1;
+
+
+ /* Unlock during rx call */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+ dhd_os_sdlock(bus->dhd);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+ /* Message if we hit the limit */
+ if (!rxleft && !sdtest)
+ DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+ else
+#endif /* DHD_DEBUG */
+ DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ if (bus->reqbussleep)
+ {
+ dhdsdio_bussleep(bus, TRUE);
+ bus->reqbussleep = FALSE;
+ }
+ bus->readframes = FALSE;
+
+ return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus = 0;
+ uint32 hmb_data;
+ uint8 fcbits;
+ uint retries = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Read mailbox data and ack that we did so */
+ R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+ if (!bus->rxskip) {
+ DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+ }
+ bus->rxskip = FALSE;
+ intstatus |= FRAME_AVAIL_MASK(bus);
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ else
+ DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+ /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+ uint32 val;
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ }
+
+#ifdef DHD_DEBUG
+ /* Retrieve console state address now that firmware should have updated it */
+ {
+ sdpcm_shared_t shared;
+ if (dhdsdio_readshared(bus, &shared) == 0)
+ bus->console_addr = shared.console_addr;
+ }
+#endif /* DHD_DEBUG */
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more. Leave this here for possibly remaining backward
+ * compatible with older dongles
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+#ifdef DHD_DEBUG
+ /* At least print a message if FW halted */
+ if (hmb_data & HMB_DATA_FWHALT) {
+ DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED\n"));
+ dhdsdio_checkdied(bus, NULL, 0);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_FWHALT |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK |
+ HMB_DATA_VERSION_MASK)) {
+ DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ }
+
+ return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+ uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = TRUE; /* Flag for no more read data */
+ bool resched = FALSE; /* Flag indicating resched wanted */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ return 0;
+ }
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* If waiting for HTAVAIL, check status */
+ if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+ int err;
+ uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+ /* Check for inconsistent device control */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+
+ DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+ if (bus->clkstate != CLK_AVAIL)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ bus->f1regdata++;
+ if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+ (newstatus == I_XMTDATA_AVAIL)) {
+ }
+ else
+ W_SDREG(newstatus, &regs->intstatus, retries);
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata += 2;
+ bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Just being here means nothing more to do for chipactive */
+ if (intstatus & I_CHIPACTIVE) {
+ /* ASSERT(bus->clkstate == CLK_AVAIL); */
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ DHD_ERROR(("Dongle reports SBINT\n"));
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip) {
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ }
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE(bus, intstatus)) {
+ framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ rxlimit -= MIN(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ bcmsdh_intr_enable(sdh);
+ }
+
+ if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+
+ bus->ctrl_frame_stat = FALSE;
+ dhd_wait_event_wakeup(bus->dhd);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+ framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+ framecnt = dhdsdio_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+ /* Resched the DPC if ctrl cmd is pending on bus credit */
+ if (bus->ctrl_frame_stat)
+ resched = TRUE;
+
+ /* Resched if events or tx frames are pending, else await next interrupt */
+ /* On failed register access, all bets are off: no resched or interrupts */
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+ if ((bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+ SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ /* Bus failed because of KSO */
+ DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+ bus->kso = FALSE;
+ } else {
+ DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+ __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->intstatus = 0;
+ }
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Awaiting I_CHIPACTIVE; don't resched */
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
+ resched = TRUE;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+exit:
+ dhd_os_sdunlock(bus->dhd);
+ return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ bool resched;
+
+ /* Call the DPC directly. */
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ resched = dhdsdio_dpc(bus);
+
+ return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ return;
+ }
+ sdh = bus->sdh;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = TRUE;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (!SLPAUTO_ENAB(bus)) {
+ if (bus->sleeping) {
+ DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ return;
+ } else if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("ISR in devsleep 1\n"));
+ }
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (bus->intr) {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ }
+
+ bcmsdh_intr_disable(sdh);
+ bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhdsdio_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+#endif
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+ /* Default to specified length, or full range */
+ if (dhd_pktgen_len) {
+ bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ bus->pktgen_minlen = bus->pktgen_maxlen;
+ } else {
+ bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_minlen = 0;
+ }
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Default to per-watchdog burst with 10s print time */
+ bus->pktgen_freq = 1;
+ bus->pktgen_print = dhd_watchdog_ms ? 10000 / dhd_watchdog_ms : 0;
+ bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+ /* Default to echo mode */
+ bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+ void *pkt;
+ uint8 *data;
+ uint pktcount;
+ uint fillbyte;
+ osl_t *osh = bus->dhd->osh;
+ uint16 len;
+
+ /* Display current count if appropriate */
+ if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+ bus->pktgen_ptick = 0;
+ printf("%s: send attempts %d rcvd %d\n",
+ __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+ }
+
+ /* For recv mode, just make sure dongle has started sending */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+ dhdsdio_sdtest_set(bus, (uint8)bus->pktgen_total);
+ }
+ return;
+ }
+
+ /* Otherwise, generate or request the specified number of packets */
+ for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+ /* Stop if total has been reached */
+ if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ break;
+ }
+
+ /* Allocate an appropriate-sized packet */
+ len = bus->pktgen_len;
+ if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ TRUE))) {;
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ break;
+ }
+ PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Write test header cmd and extra based on mode */
+ switch (bus->pktgen_mode) {
+ case DHD_PKTGEN_ECHO:
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_SEND:
+ *data++ = SDPCM_TEST_DISCARD;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_RXBURST:
+ *data++ = SDPCM_TEST_BURST;
+ *data++ = (uint8)bus->pktgen_count;
+ break;
+
+ default:
+ DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+ PKTFREE(osh, pkt, TRUE);
+ bus->pktgen_count = 0;
+ return;
+ }
+
+ /* Write test header length field */
+ *data++ = (len >> 0);
+ *data++ = (len >> 8);
+
+ /* Then fill in the remainder -- N/A for burst, but who cares... */
+ for (fillbyte = 0; fillbyte < len; fillbyte++)
+ *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+ prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+ bus->pktgen_fail++;
+ if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+ bus->pktgen_count = 0;
+ }
+ bus->pktgen_sent++;
+
+ /* Bump length if not fixed, wrap at max */
+ if (++bus->pktgen_len > bus->pktgen_maxlen)
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Special case for burst mode: just send one request! */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ break;
+ }
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count)
+{
+ void *pkt;
+ uint8 *data;
+ osl_t *osh = bus->dhd->osh;
+
+ /* Allocate the packet */
+ if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ return;
+ }
+ PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Fill in the test header */
+ *data++ = SDPCM_TEST_SEND;
+ *data++ = count;
+ *data++ = (bus->pktgen_maxlen >> 0);
+ *data++ = (bus->pktgen_maxlen >> 8);
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+ bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint8 *data;
+ uint pktlen;
+
+ uint8 cmd;
+ uint8 extra;
+ uint16 len;
+ uint16 offset;
+
+ /* Check for min length */
+ if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+
+ /* Extract header fields */
+ data = PKTDATA(osh, pkt);
+ cmd = *data++;
+ extra = *data++;
+ len = *data++; len += *data++ << 8;
+ DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+ /* Check length for relevant commands */
+ if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+ if (pktlen != len + SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+ }
+
+ /* Process as per command */
+ switch (cmd) {
+ case SDPCM_TEST_ECHOREQ:
+ /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+ *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+ bus->pktgen_sent++;
+ } else {
+ bus->pktgen_fail++;
+ PKTFREE(osh, pkt, FALSE);
+ }
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_ECHORSP:
+ if (bus->ext_loop) {
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+ }
+
+ for (offset = 0; offset < len; offset++, data++) {
+ if (*data != SDPCM_TEST_FILL(offset, extra)) {
+ DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+ "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+ offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+ break;
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_DISCARD:
+ {
+ int i = 0;
+ uint8 *prn = data;
+ uint8 testval = extra;
+ for (i = 0; i < len; i++) {
+ if (*prn != testval) {
+ DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+ i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+ prn++; testval++;
+ }
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_BURST:
+ case SDPCM_TEST_SEND:
+ default:
+ DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ break;
+ }
+
+ /* For recv mode, stop at limit (and tell dongle to stop sending) */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcvd_rcvsession++;
+
+ if (bus->pktgen_total &&
+ (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ DHD_ERROR(("Pktgen:rcv test complete!\n"));
+ bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+ dhdsdio_sdtest_set(bus, FALSE);
+ bus->pktgen_rcvd_rcvsession = 0;
+ }
+ }
+ }
+}
+#endif /* SDTEST */
+
+extern void
+dhd_disable_intr(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+ bus = dhdp->bus;
+ bcmsdh_intr_disable(bus->sdh);
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+
+ DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+ bus = dhdp->bus;
+
+ if (bus->dhd->dongle_reset)
+ return FALSE;
+
+ /* Ignore the timer if simulating bus down */
+ if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+ return FALSE;
+
+ if (dhdp->busstate == DHD_BUS_DOWN)
+ return FALSE;
+
+ /* Poll period: check device if appropriate. */
+ if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+ uint32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ uint8 devpend;
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+ SDIOD_CCCR_INTPEND, NULL);
+ intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = TRUE;
+ if (bus->intr) {
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+
+#ifdef DHD_DEBUG
+ /* Poll for console output periodically */
+ if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
+ /* Make sure backplane clock is on */
+ if (SLPAUTO_ENAB(bus))
+ dhdsdio_bussleep(bus, FALSE);
+ else
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (dhdsdio_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+ /* Generate packets if configured */
+ if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ bus->pktgen_tick = 0;
+ dhdsdio_pktgen(bus);
+ }
+#endif
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = FALSE;
+ if (SLPAUTO_ENAB(bus)) {
+ if (!bus->readframes)
+ dhdsdio_bussleep(bus, TRUE);
+ else
+ bus->reqbussleep = TRUE;
+ }
+ else
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ }
+ }
+
+ return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 addr, val;
+ int rv;
+ void *pkt;
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Exclusive bus access */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ BUS_WAKE(bus);
+ /* No pend allowed since txpkt is called later, ht clk has to be on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+ val = htol32(0);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Bump dongle by sending an empty packet on the event channel.
+ * sdpcm_sendup (RX) checks for virtual console input.
+ */
+ if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+ dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+ uint byte, tag, tdata;
+ DHD_INFO(("Function %d CIS:\n", fn));
+
+ for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+ if ((byte % 16) == 0)
+ DHD_INFO((" "));
+ DHD_INFO(("%02x ", cis[byte]));
+ if ((byte % 16) == 15)
+ DHD_INFO(("\n"));
+ if (!tdata--) {
+ tag = cis[byte];
+ if (tag == 0xff)
+ break;
+ else if (!tag)
+ tdata = 0;
+ else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+ tdata = cis[byte + 1] + 1;
+ else
+ DHD_INFO(("]"));
+ }
+ }
+ if ((byte % 16) != 15)
+ DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+ if (chipid == BCM4325_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4329_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4315_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4319_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4336_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4330_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43237_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4314_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4334_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43239_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4324_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4335_CHIP_ID)
+ return TRUE;
+ return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+ uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+ int ret;
+ dhd_bus_t *bus;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_txbound = DHD_TXBOUND;
+ dhd_rxbound = DHD_RXBOUND;
+ dhd_alignctl = TRUE;
+ sd1idle = TRUE;
+ dhd_readahead = TRUE;
+ retrydata = FALSE;
+ dhd_doflow = FALSE;
+ dhd_dongle_memsize = 0;
+ dhd_txminmax = DHD_TXMINMAX;
+
+ forcealign = TRUE;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+ /* We make assumptions about address window mappings */
+ ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+ /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+ * means early parse could fail, so here we should get either an ID
+ * we recognize OR (-1) indicating we must request power first.
+ */
+ /* Check the Vendor ID */
+ switch (venid) {
+ case 0x0000:
+ case VENDOR_BROADCOM:
+ break;
+ default:
+ DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+ __FUNCTION__, venid));
+ return NULL;
+ }
+
+ /* Check the Device ID and make sure it's one that we support */
+ switch (devid) {
+ case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
+ case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
+ case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
+ DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4329_D11N_ID: /* 4329 802.11n dualband device */
+ case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
+ case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
+ case 0x4329:
+ DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */
+ case BCM4315_D11G_ID: /* 4315 802.11g id */
+ case BCM4315_D11A_ID: /* 4315 802.11a id */
+ DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4319_D11N_ID: /* 4319 802.11n id */
+ case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
+ case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
+ DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+ break;
+ case 0:
+ DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+ __FUNCTION__));
+ break;
+
+ default:
+ DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __FUNCTION__, venid, devid));
+ return NULL;
+ }
+
+ if (osh == NULL) {
+ /* Ask the OS interface part for an OSL handle */
+ if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+ DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+ return NULL;
+ }
+ }
+
+ /* Allocate private bus interface state */
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ goto fail;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->sdh = sdh;
+ bus->cl_devid = (uint16)devid;
+ bus->bus = DHD_BUS;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+ /* attach the common module */
+ dhd_common_init(osh);
+
+ /* attempt to attach to the dongle */
+ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+ DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS/network interface */
+ if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Allocate buffers */
+ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+ bcmsdh_intr_disable(sdh);
+ if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+ DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+ DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+ __FUNCTION__));
+ }
+
+ DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ /* Read MAC address from external customer place */
+ memset(&ea_addr, 0, sizeof(ea_addr));
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ /* if firmware path present try to download and bring up bus */
+ if (dhd_download_fw_on_driverload && (ret = dhd_bus_start(bus->dhd)) != 0) {
+ DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+ if (ret == BCME_NOTUP)
+ goto fail;
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_net_attach(bus->dhd, 0) != 0) {
+ DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ return bus;
+
+fail:
+ dhdsdio_release(bus, osh);
+ return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+ uint16 devid)
+{
+ int err = 0;
+ uint8 clkctl = 0;
+
+ bus->alp_only = TRUE;
+ bus->sih = NULL;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+ DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+ }
+
+#ifdef DHD_DEBUG
+ DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+
+#endif /* DHD_DEBUG */
+
+
+ /* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+ DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, DHD_INIT_CLKCTL1, clkctl));
+ goto fail;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_INFO_ON()) {
+ uint fn, numfn;
+ uint8 *cis[SDIOD_MAX_IOFUNCS];
+ int err = 0;
+
+ numfn = bcmsdh_query_iofnum(sdh);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ /* Make sure ALP is available before trying to read CIS */
+ SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+ /* Now request ALP be put on the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ DHD_INIT_CLKCTL2, &err);
+ OSL_DELAY(65);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+ break;
+ }
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+ dhd_dump_cis(fn, cis[fn]);
+ }
+
+ while (fn-- > 0) {
+ ASSERT(cis[fn]);
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ }
+
+ if (err) {
+ DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+ goto fail;
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+ if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+ DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __FUNCTION__, bus->sih->chip));
+ goto fail;
+ }
+
+ if (bus->sih->buscorerev >= 12)
+ dhdsdio_clk_kso_init(bus);
+ else
+ bus->kso = TRUE;
+
+ if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+ }
+
+ si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+ /* Get info on the ARM and SOCRAM cores... */
+ if (!DHD_NOPMU(bus)) {
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_memsize)
+ dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+ bus->ramsize, bus->orig_ramsize));
+
+ bus->srmemsize = si_socram_srmem_size(bus->sih);
+ }
+
+ /* ...but normally deal with the SDPCMDEV core */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+ !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->sdpcmrev = si_corerev(bus->sih);
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+ bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1))
+ {
+ uint32 val;
+
+ val = R_REG(osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(osh, &bus->regs->corecontrol, val);
+ }
+
+
+ pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+
+ return TRUE;
+
+fail:
+ if (bus->sih != NULL)
+ si_detach(bus->sih);
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+
+ /* Align the buffer */
+ if ((uintptr)bus->databuf % DHD_SDALIGN)
+ bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ int32 fnum;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+ dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->sleeping = FALSE;
+ bus->rxflow = FALSE;
+ bus->prev_rxlim_hit = 0;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = (int32)dhd_idletime;
+ bus->idleclock = DHD_IDLE_ACTIVE;
+
+ /* Query the SD clock speed */
+ if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+ bus->sd_divisor = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_divisor", bus->sd_divisor));
+ }
+
+ /* Query the SD bus mode */
+ if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+ bus->sd_mode = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_mode", bus->sd_mode));
+ }
+
+ /* Query the F2 block size, set roundup accordingly */
+ fnum = 2;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ /* Query if bus module supports packet chaining, default to use if supported */
+ if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_rxchain = FALSE;
+ } else {
+ DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+ __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+ }
+ bus->use_rxchain = (bool)bus->sd_rxchain;
+
+ return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path)
+{
+ bool ret;
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+
+ ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+ return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ bool ret;
+
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ /* Download the firmware */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ ret = _dhdsdio_download_firmware(bus) == 0;
+
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+ bool dongle_isolation = FALSE;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(osh);
+
+ if (bus->dhd) {
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhd_detach(bus->dhd);
+ }
+
+ /* De-register interrupt handler */
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_intr_dereg(bus->sdh);
+
+ if (bus->dhd) {
+ dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+ if (bus->console.buf != NULL)
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+
+ if (osh)
+ dhd_osl_detach(osh);
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+ }
+
+ if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ bus->databuf = NULL;
+ }
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+ return;
+
+ if (bus->sih) {
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+#if !defined(BCMLXSDMMC)
+ if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+ si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ si_detach(bus->sih);
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(bus->dhd);
+ dhdsdio_release(bus, bus->dhd->osh);
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+ dhdsdio_probe,
+ dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+ return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+ bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *ularray = NULL;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ uint len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+ image = dhd_os_open_image(pfw_path);
+ if (image == NULL)
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+/*
+ EXAMPLE: nvram_array
+ nvram_arry format:
+ name=value
+ Use carriage return at the end of each assignment, and an empty string with
+ carriage return at the end of array.
+
+ For example:
+ unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+ Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+ Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+ bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+ if (!nvram_file_exists && (bus->nvram_params == NULL))
+ return (0);
+
+ if (nvram_file_exists) {
+ image = dhd_os_open_image(pnv_path);
+ if (image == NULL)
+ goto err;
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+
+ /* Download variables */
+ if (nvram_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+ }
+ else {
+ len = strlen(bus->nvram_params);
+ ASSERT(len <= MAX_NVRAMBUF_SIZE);
+ memcpy(memblock, bus->nvram_params, len);
+ }
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = process_nvram_vars(bufp, len);
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return 0;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdsdio_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ }
+ else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ }
+ else {
+ dlok = TRUE;
+ }
+ }
+#else
+ BCM_REFERENCE(embed);
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+ /* External nvram takes precedence if specified */
+ if (dhdsdio_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdsdio_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ int status;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+ return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+ return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+
+ if (flag == TRUE) {
+ if (!bus->dhd->dongle_reset) {
+ dhd_os_sdlock(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+ /* Expect app to have torn down any connection before calling */
+ /* Stop the bus, disable F2 */
+ dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+ /* Clean up any pending IRQ */
+ bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* Clean tx/rx buffer pointers, detach from the dongle */
+ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+ bus->dhd->dongle_reset = TRUE;
+ bus->dhd->up = FALSE;
+ dhd_os_sdunlock(dhdp);
+
+ DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+
+ DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset) {
+ /* Turn on WLAN */
+#ifdef DHDTHREAD
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+ /* Reset SD client */
+ bcmsdh_reset(bus->sdh);
+
+ /* Attempt to re-attach & download */
+ if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+ (uint32 *)SI_ENUM_BASE,
+ bus->cl_devid)) {
+ /* Attempt to download binary to the dongle */
+ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+ /* Re-init bus, enable F2 transfer */
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_set_irq(TRUE);
+ dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ bus->dhd->dongle_reset = FALSE;
+ bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ dhd_bus_stop(bus, FALSE);
+ dhdsdio_release_dongle(bus, bus->dhd->osh,
+ TRUE, FALSE);
+ }
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ } else {
+ bcmerror = BCME_SDIO_ERROR;
+ DHD_INFO(("%s called when dongle is not in reset\n",
+ __FUNCTION__));
+ DHD_INFO(("Will call dhd_bus_start instead\n"));
+ sdioh_start(NULL, 1);
+ if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+ DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ return bcmerror;
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ return bus->sih->chip;
+}
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+ return dhdsdio_membytes(bus, set, address, data, size);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 000000000000..09275a2824e7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,278 @@
+/*
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h 328424 2012-04-19 05:23:09Z $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 1024
+
+#define WLFC_HANGER_ITEM_STATE_FREE 1
+#define WLFC_HANGER_ITEM_STATE_INUSE 2
+
+#define WLFC_PKTID_HSLOT_MASK 0xffff /* allow 16 bits only */
+#define WLFC_PKTID_HSLOT_SHIFT 8
+
+/* x -> TXSTATUS TAG to/from firmware */
+#define WLFC_PKTID_HSLOT_GET(x) \
+ (((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK)
+#define WLFC_PKTID_HSLOT_SET(var, slot) \
+ ((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \
+ (((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT))
+
+#define WLFC_PKTID_FREERUNCTR_MASK 0xff
+
+#define WLFC_PKTID_FREERUNCTR_GET(x) ((x) & WLFC_PKTID_FREERUNCTR_MASK)
+#define WLFC_PKTID_FREERUNCTR_SET(var, ctr) \
+ ((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \
+ (((ctr) & WLFC_PKTID_FREERUNCTR_MASK))))
+
+#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \
+ NULL : pktq_penq((pq), (prec), (p)))
+#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \
+ NULL : pktq_penq_head((pq), (prec), (p)))
+
+typedef enum ewlfc_packet_state {
+ eWLFC_PKTTYPE_NEW,
+ eWLFC_PKTTYPE_DELAYED,
+ eWLFC_PKTTYPE_SUPPRESSED,
+ eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+ eWLFC_MAC_ENTRY_ACTION_ADD,
+ eWLFC_MAC_ENTRY_ACTION_DEL,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+ uint8 state;
+ uint8 pad[3];
+ uint32 identifier;
+ void* pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 push_time;
+#endif
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+ int max_items;
+ uint32 pushed;
+ uint32 popped;
+ uint32 failed_to_push;
+ uint32 failed_to_pop;
+ uint32 failed_slotfind;
+ wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \
+ sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN 1
+#define WLFC_STATE_CLOSE 2
+
+#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_PSQ_LEN 256
+#define WLFC_SENDQ_LEN 128
+
+
+#define WLFC_FLOWCONTROL_HIWATER 128
+#define WLFC_FLOWCONTROL_LOWATER 64
+
+
+typedef struct wlfc_mac_descriptor {
+ uint8 occupied;
+ uint8 interface_id;
+ uint8 iftype;
+ uint8 state;
+ uint8 ac_bitmap; /* for APSD */
+ uint8 requested_credit;
+ uint8 requested_packet;
+ uint8 ea[ETHER_ADDR_LEN];
+ /*
+ maintain (MAC,AC) based seq count for
+ packets going to the device. As well as bc/mc.
+ */
+ uint8 seq[AC_COUNT + 1];
+ uint8 generation;
+ struct pktq psq;
+ /* The AC pending bitmap that was reported to the fw at last change */
+ uint8 traffic_lastreported_bmp;
+ /* The new AC pending bitmap */
+ uint8 traffic_pending_bmp;
+ /* 1= send on next opportunity */
+ uint8 send_tim_signal;
+ uint8 mac_handle;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 dstncredit_sent_packets;
+ uint32 dstncredit_acks;
+ uint32 opened_ct;
+ uint32 closed_ct;
+#endif
+} wlfc_mac_descriptor_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+ entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+ uint32 pktin;
+ uint32 pkt2bus;
+ uint32 pktdropped;
+ uint32 tlv_parse_failed;
+ uint32 rollback;
+ uint32 rollback_failed;
+ uint32 sendq_full_error;
+ uint32 delayq_full_error;
+ uint32 credit_request_failed;
+ uint32 packet_request_failed;
+ uint32 mac_update_failed;
+ uint32 psmode_update_failed;
+ uint32 interface_update_failed;
+ uint32 wlfc_header_only_pkt;
+ uint32 txstatus_in;
+ uint32 d11_suppress;
+ uint32 wl_suppress;
+ uint32 bad_suppress;
+ uint32 pkt_freed;
+ uint32 pkt_free_err;
+ uint32 psq_wlsup_retx;
+ uint32 psq_wlsup_enq;
+ uint32 psq_d11sup_retx;
+ uint32 psq_d11sup_enq;
+ uint32 psq_hostq_retx;
+ uint32 psq_hostq_enq;
+ uint32 mac_handle_notfound;
+ uint32 wlc_tossed_pkts;
+ uint32 dhd_hdrpulls;
+ uint32 generic_error;
+ /* an extra one for bc/mc traffic */
+ uint32 sendq_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+ /* all pkt2bus -> txstatus latency accumulated */
+ uint32 latency_sample_count;
+ uint32 total_status_latency;
+ uint32 latency_most_recent;
+ int idx_delta;
+ uint32 deltas[10];
+ uint32 fifo_credits_sent[6];
+ uint32 fifo_credits_back[6];
+ uint32 dropped_qfull[6];
+ uint32 signal_only_pkts_sent;
+ uint32 signal_only_pkts_freed;
+#endif
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+ (ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE 0
+#define WLFC_FCMODE_IMPLIED_CREDIT 1
+#define WLFC_FCMODE_EXPLICIT_CREDIT 2
+
+/* How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+/* Mask to check for only on-going AC_BE traffic */
+#define WLFC_AC_BE_TRAFFIC_ONLY 0xD
+
+typedef struct athost_wl_status_info {
+ uint8 last_seqid_to_wlc;
+
+ /* OSL handle */
+ osl_t* osh;
+ /* dhd pub */
+ void* dhdp;
+
+ /* stats */
+ athost_wl_stat_counters_t stats;
+
+ /* the additional ones are for bc/mc and ATIM FIFO */
+ int FIFO_credit[AC_COUNT + 2];
+
+ /* Credit borrow counts for each FIFO from each of the other FIFOs */
+ int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+ struct pktq SENDQ;
+
+ /* packet hanger and MAC->handle lookup table */
+ void* hanger;
+ struct {
+ /* table for individual nodes */
+ wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE];
+ /* table for interfaces */
+ wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM];
+ /* OS may send packets to unknown (unassociated) destinations */
+ /* A place holder for bc/mc and packets to unknown destinations */
+ wlfc_mac_descriptor_t other;
+ } destination_entries;
+ /* token position for different priority packets */
+ uint8 token_pos[AC_COUNT+1];
+ /* ON/OFF state for flow control to the host network interface */
+ uint8 hostif_flow_state[WLFC_MAX_IFNUM];
+ uint8 host_ifidx;
+ /* to flow control an OS interface */
+ uint8 toggle_host_if;
+
+ /*
+ Mode in which the dhd flow control shall operate. Must be set before
+ traffic starts to the device.
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ uint8 proptxstatus_mode;
+
+ /* To borrow credits */
+ uint8 allow_credit_borrow;
+
+ /* Timestamp to compute how long to defer borrowing for */
+ uint32 borrow_defer_timestamp;
+
+} athost_wl_status_info_t;
+
+int dhd_wlfc_enable(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(struct dhd_info *,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data);
+int dhd_wlfc_event(struct dhd_info *dhd);
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 000000000000..5e5a2e2e5314
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 000000000000..0e37df6e19ef
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+ uint8 type; /* Header type */
+ uint8 version; /* Header version */
+ int8 rssi; /* RSSI */
+ uint8 pad; /* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN sizeof(wl_header_t)
+#define WL_HEADER_TYPE 0
+#define WL_HEADER_VER 1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 000000000000..eeee5ea9888a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,197 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c 324060 2012-03-27 23:26:47Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define PMU_ERROR(args)
+
+#define PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define PMU_NONE(args)
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+ {4, 0x2},
+ {2, 0x3},
+ {1, 0x0},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+ {32, 0x7},
+ {26, 0x6},
+ {22, 0x5},
+ {16, 0x4},
+ {12, 0x3},
+ {8, 0x2},
+ {4, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val = 0;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0;
+ uint32 str_shift = 0;
+
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+ switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+ if (sih->pmurev == 8) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+ }
+ else if (sih->pmurev == 11) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ }
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 cc_data_temp;
+ int i;
+
+ /* Pick the lowest available drive strength equal or greater than the
+ * requested strength. Drive strength of 0 requests tri-state.
+ */
+ for (i = 0; drivestrength < str_tab[i].strength; i++)
+ ;
+
+ if (i > 0 && drivestrength > str_tab[i].strength)
+ i--;
+
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ cc_data_temp |= str_tab[i].sel << str_shift;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+ PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+ drivestrength, str_tab[i].strength));
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 000000000000..42b3b689b561
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+# automerger maintained database to see where src/include
+# came from
+# 2. For select components, as listed in compvers.sh
+# it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 241686 2011-02-19 00:22:45Z $
+#
+
+SRCBASE := ..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch version
+epivers:
+ bash epivers.sh
+
+# Generate epivers.h for native branch version
+compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "Generating component versions, if any"; \
+ bash compvers.sh; \
+ else \
+ echo "Skipping component version generation"; \
+ fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "bash compvers.sh clean"; \
+ bash compvers.sh clean; \
+ else \
+ echo "Skipping component version clean"; \
+ fi
+
+clean:
+ rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 000000000000..63513e6f6e29
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,375 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _AIDMP_H
+#define _AIDMP_H
+
+
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+
+#define CC_SIM 0
+#define CC_EROM 1
+#define CC_CORESIGHT 9
+#define CC_VERIF 0xb
+#define CC_OPTIMO 0xd
+#define CC_GEN 0xe
+#define CC_PRIMECELL 0xf
+
+
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+
+#define MPD_MUI_MASK 0x0000ff00
+#define MPD_MUI_SHIFT 8
+#define MPD_MP_MASK 0x000000f0
+#define MPD_MP_SHIFT 4
+
+
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000
+
+
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+ uint32 oobselina30;
+ uint32 oobselina74;
+ uint32 PAD[6];
+ uint32 oobselinb30;
+ uint32 oobselinb74;
+ uint32 PAD[6];
+ uint32 oobselinc30;
+ uint32 oobselinc74;
+ uint32 PAD[6];
+ uint32 oobselind30;
+ uint32 oobselind74;
+ uint32 PAD[38];
+ uint32 oobselouta30;
+ uint32 oobselouta74;
+ uint32 PAD[6];
+ uint32 oobseloutb30;
+ uint32 oobseloutb74;
+ uint32 PAD[6];
+ uint32 oobseloutc30;
+ uint32 oobseloutc74;
+ uint32 PAD[6];
+ uint32 oobseloutd30;
+ uint32 oobseloutd74;
+ uint32 PAD[38];
+ uint32 oobsynca;
+ uint32 oobseloutaen;
+ uint32 PAD[6];
+ uint32 oobsyncb;
+ uint32 oobseloutben;
+ uint32 PAD[6];
+ uint32 oobsyncc;
+ uint32 oobseloutcen;
+ uint32 PAD[6];
+ uint32 oobsyncd;
+ uint32 oobseloutden;
+ uint32 PAD[38];
+ uint32 oobaextwidth;
+ uint32 oobainwidth;
+ uint32 oobaoutwidth;
+ uint32 PAD[5];
+ uint32 oobbextwidth;
+ uint32 oobbinwidth;
+ uint32 oobboutwidth;
+ uint32 PAD[5];
+ uint32 oobcextwidth;
+ uint32 oobcinwidth;
+ uint32 oobcoutwidth;
+ uint32 PAD[5];
+ uint32 oobdextwidth;
+ uint32 oobdinwidth;
+ uint32 oobdoutwidth;
+ uint32 PAD[37];
+ uint32 ioctrlset;
+ uint32 ioctrlclear;
+ uint32 ioctrl;
+ uint32 PAD[61];
+ uint32 iostatus;
+ uint32 PAD[127];
+ uint32 ioctrlwidth;
+ uint32 iostatuswidth;
+ uint32 PAD[62];
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 PAD[60];
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 PAD[56];
+ uint32 intstatus;
+ uint32 PAD[255];
+ uint32 config;
+ uint32 PAD[63];
+ uint32 itcr;
+ uint32 PAD[3];
+ uint32 itipooba;
+ uint32 itipoobb;
+ uint32 itipoobc;
+ uint32 itipoobd;
+ uint32 PAD[4];
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+ uint32 PAD[4];
+ uint32 itopooba;
+ uint32 itopoobb;
+ uint32 itopoobc;
+ uint32 itopoobd;
+ uint32 PAD[4];
+ uint32 itopoobain;
+ uint32 itopoobbin;
+ uint32 itopoobcin;
+ uint32 itopoobdin;
+ uint32 PAD[4];
+ uint32 itopreset;
+ uint32 PAD[15];
+ uint32 peripherialid4;
+ uint32 peripherialid5;
+ uint32 peripherialid6;
+ uint32 peripherialid7;
+ uint32 peripherialid0;
+ uint32 peripherialid1;
+ uint32 peripherialid2;
+ uint32 peripherialid3;
+ uint32 componentid0;
+ uint32 componentid1;
+ uint32 componentid2;
+ uint32 componentid3;
+} aidmp_t;
+
+#endif
+
+
+#define OOB_BUSCONFIG 0x020
+#define OOB_STATUSA 0x100
+#define OOB_STATUSB 0x104
+#define OOB_STATUSC 0x108
+#define OOB_STATUSD 0x10c
+#define OOB_ENABLEA0 0x200
+#define OOB_ENABLEA1 0x204
+#define OOB_ENABLEA2 0x208
+#define OOB_ENABLEA3 0x20c
+#define OOB_ENABLEB0 0x280
+#define OOB_ENABLEB1 0x284
+#define OOB_ENABLEB2 0x288
+#define OOB_ENABLEB3 0x28c
+#define OOB_ENABLEC0 0x300
+#define OOB_ENABLEC1 0x304
+#define OOB_ENABLEC2 0x308
+#define OOB_ENABLEC3 0x30c
+#define OOB_ENABLED0 0x380
+#define OOB_ENABLED1 0x384
+#define OOB_ENABLED2 0x388
+#define OOB_ENABLED3 0x38c
+#define OOB_ITCR 0xf00
+#define OOB_ITIPOOBA 0xf10
+#define OOB_ITIPOOBB 0xf14
+#define OOB_ITIPOOBC 0xf18
+#define OOB_ITIPOOBD 0xf1c
+#define OOB_ITOPOOBA 0xf30
+#define OOB_ITOPOOBB 0xf34
+#define OOB_ITOPOOBC 0xf38
+#define OOB_ITOPOOBD 0xf3c
+
+
+#define AI_OOBSELINA30 0x000
+#define AI_OOBSELINA74 0x004
+#define AI_OOBSELINB30 0x020
+#define AI_OOBSELINB74 0x024
+#define AI_OOBSELINC30 0x040
+#define AI_OOBSELINC74 0x044
+#define AI_OOBSELIND30 0x060
+#define AI_OOBSELIND74 0x064
+#define AI_OOBSELOUTA30 0x100
+#define AI_OOBSELOUTA74 0x104
+#define AI_OOBSELOUTB30 0x120
+#define AI_OOBSELOUTB74 0x124
+#define AI_OOBSELOUTC30 0x140
+#define AI_OOBSELOUTC74 0x144
+#define AI_OOBSELOUTD30 0x160
+#define AI_OOBSELOUTD74 0x164
+#define AI_OOBSYNCA 0x200
+#define AI_OOBSELOUTAEN 0x204
+#define AI_OOBSYNCB 0x220
+#define AI_OOBSELOUTBEN 0x224
+#define AI_OOBSYNCC 0x240
+#define AI_OOBSELOUTCEN 0x244
+#define AI_OOBSYNCD 0x260
+#define AI_OOBSELOUTDEN 0x264
+#define AI_OOBAEXTWIDTH 0x300
+#define AI_OOBAINWIDTH 0x304
+#define AI_OOBAOUTWIDTH 0x308
+#define AI_OOBBEXTWIDTH 0x320
+#define AI_OOBBINWIDTH 0x324
+#define AI_OOBBOUTWIDTH 0x328
+#define AI_OOBCEXTWIDTH 0x340
+#define AI_OOBCINWIDTH 0x344
+#define AI_OOBCOUTWIDTH 0x348
+#define AI_OOBDEXTWIDTH 0x360
+#define AI_OOBDINWIDTH 0x364
+#define AI_OOBDOUTWIDTH 0x368
+
+
+#define AI_IOCTRLSET 0x400
+#define AI_IOCTRLCLEAR 0x404
+#define AI_IOCTRL 0x408
+#define AI_IOSTATUS 0x500
+#define AI_RESETCTRL 0x800
+#define AI_RESETSTATUS 0x804
+
+#define AI_IOCTRLWIDTH 0x700
+#define AI_IOSTATUSWIDTH 0x704
+
+#define AI_RESETREADID 0x808
+#define AI_RESETWRITEID 0x80c
+#define AI_ERRLOGCTRL 0xa00
+#define AI_ERRLOGDONE 0xa04
+#define AI_ERRLOGSTATUS 0xa08
+#define AI_ERRLOGADDRLO 0xa0c
+#define AI_ERRLOGADDRHI 0xa10
+#define AI_ERRLOGID 0xa14
+#define AI_ERRLOGUSER 0xa18
+#define AI_ERRLOGFLAGS 0xa1c
+#define AI_INTSTATUS 0xa00
+#define AI_CONFIG 0xe00
+#define AI_ITCR 0xf00
+#define AI_ITIPOOBA 0xf10
+#define AI_ITIPOOBB 0xf14
+#define AI_ITIPOOBC 0xf18
+#define AI_ITIPOOBD 0xf1c
+#define AI_ITIPOOBAOUT 0xf30
+#define AI_ITIPOOBBOUT 0xf34
+#define AI_ITIPOOBCOUT 0xf38
+#define AI_ITIPOOBDOUT 0xf3c
+#define AI_ITOPOOBA 0xf50
+#define AI_ITOPOOBB 0xf54
+#define AI_ITOPOOBC 0xf58
+#define AI_ITOPOOBD 0xf5c
+#define AI_ITOPOOBAIN 0xf70
+#define AI_ITOPOOBBIN 0xf74
+#define AI_ITOPOOBCIN 0xf78
+#define AI_ITOPOOBDIN 0xf7c
+#define AI_ITOPRESET 0xf90
+#define AI_PERIPHERIALID4 0xfd0
+#define AI_PERIPHERIALID5 0xfd4
+#define AI_PERIPHERIALID6 0xfd8
+#define AI_PERIPHERIALID7 0xfdc
+#define AI_PERIPHERIALID0 0xfe0
+#define AI_PERIPHERIALID1 0xfe4
+#define AI_PERIPHERIALID2 0xfe8
+#define AI_PERIPHERIALID3 0xfec
+#define AI_COMPONENTID0 0xff0
+#define AI_COMPONENTID1 0xff4
+#define AI_COMPONENTID2 0xff8
+#define AI_COMPONENTID3 0xffc
+
+
+#define AIRC_RESET 1
+
+
+#define AICFG_OOB 0x00000020
+#define AICFG_IOS 0x00000010
+#define AICFG_IOC 0x00000008
+#define AICFG_TO 0x00000004
+#define AICFG_ERRL 0x00000002
+#define AICFG_RST 0x00000001
+
+
+#define OOB_SEL_OUTEN_B_5 15
+#define OOB_SEL_OUTEN_B_6 23
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_android_types.h b/drivers/net/wireless/bcmdhd/include/bcm_android_types.h
new file mode 100644
index 000000000000..fd8aea725392
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_android_types.h
@@ -0,0 +1,44 @@
+/*
+ * Android related remote wl declarations
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ * $Id: bcm_android_types.h 241182 2011-02-17 21:50:03Z $
+ *
+ */
+#ifndef _wlu_android_h
+#define _wlu_android_h
+#define __fd_mask unsigned long
+typedef struct
+ {
+
+#ifdef __USE_XOPEN
+ __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS];
+# define __FDS_BITS(set) ((set)->fds_bits)
+#else
+ __fd_mask __fds_bits[__FD_SETSIZE / __NFDBITS];
+# define __FDS_BITS(set) ((set)->__fds_bits)
+#endif
+ } fd_set1;
+#define fd_set fd_set1
+
+#define htons(x) BCMSWAP16(x)
+#define htonl(x) BCMSWAP32(x)
+#define __FD_ZERO(s) \
+ do { \
+ unsigned int __i; \
+ fd_set *__arr = (s); \
+ for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
+ __FDS_BITS(__arr)[__i] = 0; \
+ } while (0)
+#define __FD_SET(d, s) (__FDS_BITS (s)[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, s) (__FDS_BITS (s)[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, s) ((__FDS_BITS (s)[__FDELT(d)] & __FDMASK(d)) != 0)
+#define MCL_CURRENT 1
+#define MCL_FUTURE 2
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
new file mode 100644
index 000000000000..26da752186a2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -0,0 +1,29 @@
+/*
+ * BCM common config options
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_cfg.h 294399 2011-11-07 03:31:22Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
new file mode 100644
index 000000000000..8fe3de7afb72
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -0,0 +1,361 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ * pool manager: A singleton object that acts as a factory for
+ * pool allocators. It also is used for global
+ * instrumentation, such as reporting all blocks
+ * in use across all data structures. The pool manager
+ * creates and provides individual memory pools
+ * upon request to application code.
+ *
+ * memory pool: An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ * bcm_mpm_* Memory pool manager
+ * bcm_mp_* Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ * prealloc: The contiguous memory block of objects can either be supplied
+ * by the client or malloc'ed by the memory manager. The objects are
+ * allocated out of a block of memory and freed back to the block.
+ *
+ * heap: The memory pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing statistics
+ * and instrumentation on top of the heap, without modifying the heap
+ * allocation implementation.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+ char name[BCM_MP_NAMELEN]; /* Name of this pool. */
+ unsigned int objsz; /* Object size allocated in this pool */
+ uint16 nobj; /* Total number of objects in this pool */
+ uint16 num_alloc; /* Number of objects currently allocated */
+ uint16 high_water; /* Max number of allocated objects. */
+ uint16 failed_alloc; /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ * osh: INPUT Operating system handle. Needed for heap memory allocation.
+ * max_pools: INPUT Maximum number of mempools supported.
+ * mgr: OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ * BCME_OK Object initialized successfully. May be used.
+ * BCME_NOMEM Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ * mgr: INPUT Pointer to pool manager handle.
+ *
+ * Returns:
+ * BCME_OK Memory pool manager successfully de-initialized.
+ * other Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ * pool uses a contiguous block of pre-alloced
+ * memory. The memory block may either be provided
+ * by the client or dynamically allocated by the
+ * pool manager.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * Must be >= sizeof(void *).
+ * nobj: INPUT Maximum number of concurrently existing objects to support
+ * memstart INPUT Pointer to the memory to use, or NULL to malloc()
+ * memsize INPUT Number of bytes referenced from memstart (for error checking).
+ * Must be 0 if 'memstart' is NULL.
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+ unsigned int obj_sz,
+ int nobj,
+ void *memstart,
+ unsigned int memsize,
+ char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ * pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing
+ * statistics and instrumentation on top of the heap,
+ * without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+ char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * stats: OUTPUT Array of pool statistics.
+ * nentries: MOD Max elements in 'stats' array on INPUT. Actual number
+ * of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * b: OUTPUT Output buffer.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ * compensate for alignment requirements of the objects.
+ * This function provides the padded object size. If clients
+ * pre-allocate a memory slab for a memory pool, the
+ * padded object size should be used by the client to allocate
+ * the memory slab (in order to provide sufficent space for
+ * the maximum number of objects).
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager.
+ * obj_sz: INPUT Input object size.
+ * padded_obj_sz: OUTPUT Padded object size.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * BCME_BADARG Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ *
+ * Returns:
+ * A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ * objp: INPUT A pointer to the object to free.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * stats: OUTPUT Pool statistics
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting statistics.
+ *
+ */
+int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * b OUTPUT Output buffer
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 000000000000..9bae1c20a9bf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,126 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ */
+#ifndef _bcmcdc_h_
+#define _bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+ uint32 cmd;
+ uint32 len;
+ uint32 flags;
+ uint32 status;
+} cdc_ioctl_t;
+
+
+#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
+
+
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF
+
+#define CDCL_IOC_OUTLEN_SHIFT 0
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000
+#define CDCL_IOC_INLEN_SHIFT 16
+
+
+#define CDCF_IOC_ERROR 0x01
+#define CDCF_IOC_SET 0x02
+#define CDCF_IOC_OVL_IDX_MASK 0x3c
+#define CDCF_IOC_OVL_RSV 0x40
+#define CDCF_IOC_OVL 0x80
+#define CDCF_IOC_ACTION_MASK 0xfe
+#define CDCF_IOC_ACTION_SHIFT 1
+#define CDCF_IOC_IF_MASK 0xF000
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000
+#define CDCF_IOC_ID_SHIFT 16
+
+#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+
+
+struct bdc_header {
+ uint8 flags;
+ uint8 priority;
+ uint8 flags2;
+ uint8 dataOffset;
+};
+
+#define BDC_HEADER_LEN 4
+
+
+#define BDC_FLAG_80211_PKT 0x01
+#define BDC_FLAG_SUM_GOOD 0x04
+#define BDC_FLAG_SUM_NEEDED 0x08
+#define BDC_FLAG_EVENT_MSG 0x08
+#define BDC_FLAG_VER_MASK 0xf0
+#define BDC_FLAG_VER_SHIFT 4
+
+
+#define BDC_PRIORITY_MASK 0x07
+#define BDC_PRIORITY_FC_MASK 0xf0
+#define BDC_PRIORITY_FC_SHIFT 4
+
+
+#define BDC_FLAG2_IF_MASK 0x0f
+#define BDC_FLAG2_IF_SHIFT 0
+#define BDC_FLAG2_FC_FLAG 0x10
+
+
+
+#define BDC_PROTO_VER_1 1
+#define BDC_PROTO_VER 2
+
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK 0xf0
+#define BDC_FLAG_PAD_MASK 0x03
+#define BDC_FLAG2_PAD_SHIFT 2
+#define BDC_FLAG_PAD_SHIFT 0
+#define BDC_FLAG2_PAD_IDX 0x3c
+#define BDC_FLAG_PAD_IDX 0x03
+#define BDC_GET_PAD_LEN(hdr) \
+ ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+ ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+ (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+ ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+ (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 000000000000..a35ed72aa857
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,239 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h 316830 2012-02-23 20:29:22Z $
+ */
+
+#ifndef _bcmdefs_h_
+#define _bcmdefs_h_
+
+
+
+
+#define BCM_REFERENCE(data) ((void)(data))
+
+
+#define STATIC_ASSERT(expr) { \
+ \
+ typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e; \
+ \
+ typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1]; \
+}
+
+
+
+#define bcmreclaimed 0
+#define _data _data
+#define _fn _fn
+#define BCMPREATTACHDATA(_data) _data
+#define BCMPREATTACHFN(_fn) _fn
+#define _data _data
+#define _fn _fn
+#define _fn _fn
+#define BCMNMIATTACHFN(_fn) _fn
+#define BCMNMIATTACHDATA(_data) _data
+#define CONST const
+#ifndef BCMFASTPATH
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+
+
+
+#define _data _data
+#define BCMROMDAT_NAME(_data) _data
+#define _fn _fn
+#define _fn _fn
+#define STATIC static
+#define BCMROMDAT_ARYSIZ(data) ARRAYSIZE(data)
+#define BCMROMDAT_SIZEOF(data) sizeof(data)
+#define BCMROMDAT_APATCH(data)
+#define BCMROMDAT_SPATCH(data)
+
+
+#define SI_BUS 0
+#define PCI_BUS 1
+#define PCMCIA_BUS 2
+#define SDIO_BUS 3
+#define JTAG_BUS 4
+#define USB_BUS 5
+#define SPI_BUS 6
+#define RPC_BUS 7
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) (BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) (bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) (BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) (bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS (BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS (PCMCIA_BUS)
+#else
+#define SPROMBUS (PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip) (BCMCHIPID)
+#else
+#define CHIPID(chip) (chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev) (BCMCHIPREV)
+#else
+#define CHIPREV(rev) (rev)
+#endif
+
+
+#define DMADDR_MASK_32 0x0
+#define DMADDR_MASK_30 0xc0000000
+#define DMADDR_MASK_0 0xffffffff
+
+#define DMADDRWIDTH_30 30
+#define DMADDRWIDTH_32 32
+#define DMADDRWIDTH_63 63
+#define DMADDRWIDTH_64 64
+
+#ifdef BCMDMA64OSL
+typedef struct {
+ uint32 loaddr;
+ uint32 hiaddr;
+} dma64addr_t;
+
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) ((_pa).hiaddr)
+#define PHYSADDRHISET(_pa, _val) \
+ do { \
+ (_pa).hiaddr = (_val); \
+ } while (0)
+#define PHYSADDRLO(_pa) ((_pa).loaddr)
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa).loaddr = (_val); \
+ } while (0)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa) = (_val); \
+ } while (0)
+#endif
+
+
+typedef struct {
+ dmaaddr_t addr;
+ uint32 length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 4
+
+
+typedef struct {
+ void *oshdmah;
+ uint origsize;
+ uint nsegs;
+ hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+
+#define BCMEXTRAHDROOM 220
+#else
+#define BCMEXTRAHDROOM 172
+#endif
+
+
+#ifndef SDALIGN
+#define SDALIGN 32
+#endif
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif
+
+
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef BCMSPACE
+#define bcmspace FALSE
+#else
+#define BCMSPACE
+#define bcmspace TRUE
+#endif
+
+
+#define MAXSZ_NVRAM_VARS 4096
+
+
+
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS
+#endif
+
+#ifdef BCMUSBDEV_ENABLED
+extern uint32 gFWID;
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 000000000000..7b0f9b165b3f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,487 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h 327007 2012-04-11 22:45:50Z $
+ */
+
+#ifndef _BCMDEVS_H
+#define _BCMDEVS_H
+
+
+#define VENDOR_EPIGRAM 0xfeda
+#define VENDOR_BROADCOM 0x14e4
+#define VENDOR_3COM 0x10b7
+#define VENDOR_NETGEAR 0x1385
+#define VENDOR_DIAMOND 0x1092
+#define VENDOR_INTEL 0x8086
+#define VENDOR_DELL 0x1028
+#define VENDOR_HP 0x103c
+#define VENDOR_HP_COMPAQ 0x0e11
+#define VENDOR_APPLE 0x106b
+#define VENDOR_SI_IMAGE 0x1095
+#define VENDOR_BUFFALO 0x1154
+#define VENDOR_TI 0x104c
+#define VENDOR_RICOH 0x1180
+#define VENDOR_JMICRON 0x197b
+
+
+
+#define VENDOR_BROADCOM_PCMCIA 0x02d0
+
+
+#define VENDOR_BROADCOM_SDIO 0x00BF
+
+
+#define BCM_DNGL_VID 0x0a5c
+#define BCM_DNGL_BL_PID_4328 0xbd12
+#define BCM_DNGL_BL_PID_4322 0xbd13
+#define BCM_DNGL_BL_PID_4319 0xbd16
+#define BCM_DNGL_BL_PID_43236 0xbd17
+#define BCM_DNGL_BL_PID_4332 0xbd18
+#define BCM_DNGL_BL_PID_4330 0xbd19
+#define BCM_DNGL_BL_PID_4334 0xbd1a
+#define BCM_DNGL_BL_PID_43239 0xbd1b
+#define BCM_DNGL_BL_PID_4324 0xbd1c
+#define BCM_DNGL_BL_PID_4360 0xbd1d
+
+#define BCM_DNGL_BDC_PID 0x0bdc
+#define BCM_DNGL_JTAG_PID 0x4a44
+
+
+#define BCM_HWUSB_PID_43239 43239
+
+
+#define BCM4210_DEVICE_ID 0x1072
+#define BCM4230_DEVICE_ID 0x1086
+#define BCM4401_ENET_ID 0x170c
+#define BCM3352_DEVICE_ID 0x3352
+#define BCM3360_DEVICE_ID 0x3360
+#define BCM4211_DEVICE_ID 0x4211
+#define BCM4231_DEVICE_ID 0x4231
+#define BCM4303_D11B_ID 0x4303
+#define BCM4311_D11G_ID 0x4311
+#define BCM4311_D11DUAL_ID 0x4312
+#define BCM4311_D11A_ID 0x4313
+#define BCM4328_D11DUAL_ID 0x4314
+#define BCM4328_D11G_ID 0x4315
+#define BCM4328_D11A_ID 0x4316
+#define BCM4318_D11G_ID 0x4318
+#define BCM4318_D11DUAL_ID 0x4319
+#define BCM4318_D11A_ID 0x431a
+#define BCM4325_D11DUAL_ID 0x431b
+#define BCM4325_D11G_ID 0x431c
+#define BCM4325_D11A_ID 0x431d
+#define BCM4306_D11G_ID 0x4320
+#define BCM4306_D11A_ID 0x4321
+#define BCM4306_UART_ID 0x4322
+#define BCM4306_V90_ID 0x4323
+#define BCM4306_D11DUAL_ID 0x4324
+#define BCM4306_D11G_ID2 0x4325
+#define BCM4321_D11N_ID 0x4328
+#define BCM4321_D11N2G_ID 0x4329
+#define BCM4321_D11N5G_ID 0x432a
+#define BCM4322_D11N_ID 0x432b
+#define BCM4322_D11N2G_ID 0x432c
+#define BCM4322_D11N5G_ID 0x432d
+#define BCM4329_D11N_ID 0x432e
+#define BCM4329_D11N2G_ID 0x432f
+#define BCM4329_D11N5G_ID 0x4330
+#define BCM4315_D11DUAL_ID 0x4334
+#define BCM4315_D11G_ID 0x4335
+#define BCM4315_D11A_ID 0x4336
+#define BCM4319_D11N_ID 0x4337
+#define BCM4319_D11N2G_ID 0x4338
+#define BCM4319_D11N5G_ID 0x4339
+#define BCM43231_D11N2G_ID 0x4340
+#define BCM43221_D11N2G_ID 0x4341
+#define BCM43222_D11N_ID 0x4350
+#define BCM43222_D11N2G_ID 0x4351
+#define BCM43222_D11N5G_ID 0x4352
+#define BCM43224_D11N_ID 0x4353
+#define BCM43224_D11N_ID_VEN1 0x0576
+#define BCM43226_D11N_ID 0x4354
+#define BCM43236_D11N_ID 0x4346
+#define BCM43236_D11N2G_ID 0x4347
+#define BCM43236_D11N5G_ID 0x4348
+#define BCM43225_D11N2G_ID 0x4357
+#define BCM43421_D11N_ID 0xA99D
+#define BCM4313_D11N2G_ID 0x4727
+#define BCM4330_D11N_ID 0x4360
+#define BCM4330_D11N2G_ID 0x4361
+#define BCM4330_D11N5G_ID 0x4362
+#define BCM4336_D11N_ID 0x4343
+#define BCM6362_D11N_ID 0x435f
+#define BCM4331_D11N_ID 0x4331
+#define BCM4331_D11N2G_ID 0x4332
+#define BCM4331_D11N5G_ID 0x4333
+#define BCM43237_D11N_ID 0x4355
+#define BCM43237_D11N5G_ID 0x4356
+#define BCM43227_D11N2G_ID 0x4358
+#define BCM43228_D11N_ID 0x4359
+#define BCM43228_D11N5G_ID 0x435a
+#define BCM43362_D11N_ID 0x4363
+#define BCM43239_D11N_ID 0x4370
+#define BCM4324_D11N_ID 0x4374
+#define BCM43217_D11N2G_ID 0x43a9
+#define BCM43131_D11N2G_ID 0x43aa
+#define BCM4314_D11N2G_ID 0x4364
+#define BCM43142_D11N2G_ID 0x4365
+#define BCM4334_D11N_ID 0x4380
+#define BCM4334_D11N2G_ID 0x4381
+#define BCM4334_D11N5G_ID 0x4382
+#define BCM4360_D11AC_ID 0x43a0
+#define BCM4360_D11AC2G_ID 0x43a1
+#define BCM4360_D11AC5G_ID 0x43a2
+
+
+#define BCM943228HMB_SSID_VEN1 0x0607
+#define BCM94313HMGBL_SSID_VEN1 0x0608
+#define BCM94313HMG_SSID_VEN1 0x0609
+
+
+#define BCM4335_D11AC_ID 0x43ae
+#define BCM4335_D11AC2G_ID 0x43af
+#define BCM4335_D11AC5G_ID 0x43b0
+#define BCM4352_D11AC_ID 0x43b1
+#define BCM4352_D11AC2G_ID 0x43b2
+#define BCM4352_D11AC5G_ID 0x43b3
+
+#define BCMGPRS_UART_ID 0x4333
+#define BCMGPRS2_UART_ID 0x4344
+#define FPGA_JTAGM_ID 0x43f0
+#define BCM_JTAGM_ID 0x43f1
+#define SDIOH_FPGA_ID 0x43f2
+#define BCM_SDIOH_ID 0x43f3
+#define SDIOD_FPGA_ID 0x43f4
+#define SPIH_FPGA_ID 0x43f5
+#define BCM_SPIH_ID 0x43f6
+#define MIMO_FPGA_ID 0x43f8
+#define BCM_JTAGM2_ID 0x43f9
+#define SDHCI_FPGA_ID 0x43fa
+#define BCM4402_ENET_ID 0x4402
+#define BCM4402_V90_ID 0x4403
+#define BCM4410_DEVICE_ID 0x4410
+#define BCM4412_DEVICE_ID 0x4412
+#define BCM4430_DEVICE_ID 0x4430
+#define BCM4432_DEVICE_ID 0x4432
+#define BCM4704_ENET_ID 0x4706
+#define BCM4710_DEVICE_ID 0x4710
+#define BCM47XX_AUDIO_ID 0x4711
+#define BCM47XX_V90_ID 0x4712
+#define BCM47XX_ENET_ID 0x4713
+#define BCM47XX_EXT_ID 0x4714
+#define BCM47XX_GMAC_ID 0x4715
+#define BCM47XX_USBH_ID 0x4716
+#define BCM47XX_USBD_ID 0x4717
+#define BCM47XX_IPSEC_ID 0x4718
+#define BCM47XX_ROBO_ID 0x4719
+#define BCM47XX_USB20H_ID 0x471a
+#define BCM47XX_USB20D_ID 0x471b
+#define BCM47XX_ATA100_ID 0x471d
+#define BCM47XX_SATAXOR_ID 0x471e
+#define BCM47XX_GIGETH_ID 0x471f
+#define BCM4712_MIPS_ID 0x4720
+#define BCM4716_DEVICE_ID 0x4722
+#define BCM47XX_SMBUS_EMU_ID 0x47fe
+#define BCM47XX_XOR_EMU_ID 0x47ff
+#define EPI41210_DEVICE_ID 0xa0fa
+#define EPI41230_DEVICE_ID 0xa10e
+#define JINVANI_SDIOH_ID 0x4743
+#define BCM27XX_SDIOH_ID 0x2702
+#define PCIXX21_FLASHMEDIA_ID 0x803b
+#define PCIXX21_SDIOH_ID 0x803c
+#define R5C822_SDIOH_ID 0x0822
+#define JMICRON_SDIOH_ID 0x2381
+
+
+#define BCM4306_CHIP_ID 0x4306
+#define BCM4311_CHIP_ID 0x4311
+#define BCM43111_CHIP_ID 43111
+#define BCM43112_CHIP_ID 43112
+#define BCM4312_CHIP_ID 0x4312
+#define BCM4313_CHIP_ID 0x4313
+#define BCM43131_CHIP_ID 43131
+#define BCM4315_CHIP_ID 0x4315
+#define BCM4318_CHIP_ID 0x4318
+#define BCM4319_CHIP_ID 0x4319
+#define BCM4320_CHIP_ID 0x4320
+#define BCM4321_CHIP_ID 0x4321
+#define BCM43217_CHIP_ID 43217
+#define BCM4322_CHIP_ID 0x4322
+#define BCM43221_CHIP_ID 43221
+#define BCM43222_CHIP_ID 43222
+#define BCM43224_CHIP_ID 43224
+#define BCM43225_CHIP_ID 43225
+#define BCM43227_CHIP_ID 43227
+#define BCM43228_CHIP_ID 43228
+#define BCM43226_CHIP_ID 43226
+#define BCM43231_CHIP_ID 43231
+#define BCM43234_CHIP_ID 43234
+#define BCM43235_CHIP_ID 43235
+#define BCM43236_CHIP_ID 43236
+#define BCM43237_CHIP_ID 43237
+#define BCM43238_CHIP_ID 43238
+#define BCM43239_CHIP_ID 43239
+#define BCM43420_CHIP_ID 43420
+#define BCM43421_CHIP_ID 43421
+#define BCM43428_CHIP_ID 43428
+#define BCM43431_CHIP_ID 43431
+#define BCM43460_CHIP_ID 43460
+#define BCM4325_CHIP_ID 0x4325
+#define BCM4328_CHIP_ID 0x4328
+#define BCM4329_CHIP_ID 0x4329
+#define BCM4331_CHIP_ID 0x4331
+#define BCM4336_CHIP_ID 0x4336
+#define BCM43362_CHIP_ID 43362
+#define BCM4330_CHIP_ID 0x4330
+#define BCM6362_CHIP_ID 0x6362
+#define BCM4314_CHIP_ID 0x4314
+#define BCM43142_CHIP_ID 43142
+#define BCM4324_CHIP_ID 0x4324
+#define BCM43242_CHIP_ID 43242
+#define BCM4334_CHIP_ID 0x4334
+#define BCM4360_CHIP_ID 0x4360
+#define BCM4352_CHIP_ID 0x4352
+#define BCM43526_CHIP_ID 0xAA06
+
+#define BCM4335_CHIP_ID 0x4335
+
+#define BCM4342_CHIP_ID 4342
+#define BCM4402_CHIP_ID 0x4402
+#define BCM4704_CHIP_ID 0x4704
+#define BCM4706_CHIP_ID 0x5300
+#define BCM4710_CHIP_ID 0x4710
+#define BCM4712_CHIP_ID 0x4712
+#define BCM4716_CHIP_ID 0x4716
+#define BCM47162_CHIP_ID 47162
+#define BCM4748_CHIP_ID 0x4748
+#define BCM4749_CHIP_ID 0x4749
+#define BCM4785_CHIP_ID 0x4785
+#define BCM5350_CHIP_ID 0x5350
+#define BCM5352_CHIP_ID 0x5352
+#define BCM5354_CHIP_ID 0x5354
+#define BCM5365_CHIP_ID 0x5365
+#define BCM5356_CHIP_ID 0x5356
+#define BCM5357_CHIP_ID 0x5357
+#define BCM53572_CHIP_ID 53572
+
+
+#define BCM4303_PKG_ID 2
+#define BCM4309_PKG_ID 1
+#define BCM4712LARGE_PKG_ID 0
+#define BCM4712SMALL_PKG_ID 1
+#define BCM4712MID_PKG_ID 2
+#define BCM4328USBD11G_PKG_ID 2
+#define BCM4328USBDUAL_PKG_ID 3
+#define BCM4328SDIOD11G_PKG_ID 4
+#define BCM4328SDIODUAL_PKG_ID 5
+#define BCM4329_289PIN_PKG_ID 0
+#define BCM4329_182PIN_PKG_ID 1
+#define BCM5354E_PKG_ID 1
+#define BCM4716_PKG_ID 8
+#define BCM4717_PKG_ID 9
+#define BCM4718_PKG_ID 10
+#define BCM5356_PKG_NONMODE 1
+#define BCM5358U_PKG_ID 8
+#define BCM5358_PKG_ID 9
+#define BCM47186_PKG_ID 10
+#define BCM5357_PKG_ID 11
+#define BCM5356U_PKG_ID 12
+#define BCM53572_PKG_ID 8
+#define BCM5357C0_PKG_ID 8
+#define BCM47188_PKG_ID 9
+#define BCM5358C0_PKG_ID 0xa
+#define BCM5356C0_PKG_ID 0xb
+#define BCM4331TT_PKG_ID 8
+#define BCM4331TN_PKG_ID 9
+#define BCM4331TNA0_PKG_ID 0xb
+#define BCM4706L_PKG_ID 1
+
+#define HDLSIM5350_PKG_ID 1
+#define HDLSIM_PKG_ID 14
+#define HWSIM_PKG_ID 15
+#define BCM43224_FAB_CSM 0x8
+#define BCM43224_FAB_SMIC 0xa
+#define BCM4336_WLBGA_PKG_ID 0x8
+#define BCM4330_WLBGA_PKG_ID 0x0
+#define BCM4314PCIE_ARM_PKG_ID (8 | 0)
+#define BCM4314SDIO_PKG_ID (8 | 1)
+#define BCM4314PCIE_PKG_ID (8 | 2)
+#define BCM4314SDIO_ARM_PKG_ID (8 | 3)
+#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4)
+#define BCM4314DEV_PKG_ID (8 | 6)
+
+#define PCIXX21_FLASHMEDIA0_ID 0x8033
+#define PCIXX21_SDIOH0_ID 0x8034
+
+
+#define BFL_BTC2WIRE 0x00000001
+#define BFL_BTCOEX 0x00000001
+#define BFL_PACTRL 0x00000002
+#define BFL_AIRLINEMODE 0x00000004
+#define BFL_ADCDIV 0x00000008
+#define BFL_RFPLL 0x00000008
+#define BFL_ENETROBO 0x00000010
+#define BFL_NOPLLDOWN 0x00000020
+#define BFL_CCKHIPWR 0x00000040
+#define BFL_ENETADM 0x00000080
+#define BFL_ENETVLAN 0x00000100
+#define BFL_UNUSED 0x00000200
+#define BFL_NOPCI 0x00000400
+#define BFL_FEM 0x00000800
+#define BFL_EXTLNA 0x00001000
+#define BFL_HGPA 0x00002000
+#define BFL_BTC2WIRE_ALTGPIO 0x00004000
+#define BFL_ALTIQ 0x00008000
+#define BFL_NOPA 0x00010000
+#define BFL_RSSIINV 0x00020000
+#define BFL_PAREF 0x00040000
+#define BFL_3TSWITCH 0x00080000
+#define BFL_PHASESHIFT 0x00100000
+#define BFL_BUCKBOOST 0x00200000
+#define BFL_FEM_BT 0x00400000
+#define BFL_NOCBUCK 0x00800000
+#define BFL_CCKFAVOREVM 0x01000000
+#define BFL_PALDO 0x02000000
+#define BFL_LNLDO2_2P5 0x04000000
+#define BFL_FASTPWR 0x08000000
+#define BFL_UCPWRCTL_MININDX 0x08000000
+#define BFL_EXTLNA_5GHz 0x10000000
+#define BFL_TRSW_1by2 0x20000000
+#define BFL_LO_TRSW_R_5GHz 0x40000000
+#define BFL_ELNA_GAINDEF 0x80000000
+#define BFL_EXTLNA_TX 0x20000000
+
+
+#define BFL2_RXBB_INT_REG_DIS 0x00000001
+#define BFL2_APLL_WAR 0x00000002
+#define BFL2_TXPWRCTRL_EN 0x00000004
+#define BFL2_2X4_DIV 0x00000008
+#define BFL2_5G_PWRGAIN 0x00000010
+#define BFL2_PCIEWAR_OVR 0x00000020
+#define BFL2_CAESERS_BRD 0x00000040
+#define BFL2_BTC3WIRE 0x00000080
+#define BFL2_BTCLEGACY 0x00000080
+#define BFL2_SKWRKFEM_BRD 0x00000100
+#define BFL2_SPUR_WAR 0x00000200
+#define BFL2_GPLL_WAR 0x00000400
+#define BFL2_TRISTATE_LED 0x00000800
+#define BFL2_SINGLEANT_CCK 0x00001000
+#define BFL2_2G_SPUR_WAR 0x00002000
+#define BFL2_BPHY_ALL_TXCORES 0x00004000
+#define BFL2_FCC_BANDEDGE_WAR 0x00008000
+#define BFL2_GPLL_WAR2 0x00010000
+#define BFL2_IPALVLSHIFT_3P3 0x00020000
+#define BFL2_INTERNDET_TXIQCAL 0x00040000
+#define BFL2_XTALBUFOUTEN 0x00080000
+
+
+
+#define BFL2_ANAPACTRL_2G 0x00100000
+#define BFL2_ANAPACTRL_5G 0x00200000
+#define BFL2_ELNACTRL_TRSW_2G 0x00400000
+#define BFL2_BT_SHARE_ANT0 0x00800000
+#define BFL2_TEMPSENSE_HIGHER 0x01000000
+#define BFL2_BTC3WIREONLY 0x02000000
+#define BFL2_PWR_NOMINAL 0x04000000
+#define BFL2_EXTLNA_PWRSAVE 0x08000000
+
+#define BFL2_4313_RADIOREG 0x10000000
+
+#define BFL2_SDR_EN 0x20000000
+
+
+#define BOARD_GPIO_BTC3W_IN 0x850
+#define BOARD_GPIO_BTC3W_OUT 0x020
+#define BOARD_GPIO_BTCMOD_IN 0x010
+#define BOARD_GPIO_BTCMOD_OUT 0x020
+#define BOARD_GPIO_BTC_IN 0x080
+#define BOARD_GPIO_BTC_OUT 0x100
+#define BOARD_GPIO_PACTRL 0x200
+#define BOARD_GPIO_12 0x1000
+#define BOARD_GPIO_13 0x2000
+#define BOARD_GPIO_BTC4_IN 0x0800
+#define BOARD_GPIO_BTC4_BT 0x2000
+#define BOARD_GPIO_BTC4_STAT 0x4000
+#define BOARD_GPIO_BTC4_WLAN 0x8000
+#define BOARD_GPIO_1_WLAN_PWR 0x02
+#define BOARD_GPIO_3_WLAN_PWR 0x08
+#define BOARD_GPIO_4_WLAN_PWR 0x10
+
+#define GPIO_BTC4W_OUT_4312 0x010
+#define GPIO_BTC4W_OUT_43224 0x020
+#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0
+#define GPIO_BTC4W_OUT_43225 0x0e0
+#define GPIO_BTC4W_OUT_43421 0x020
+#define GPIO_BTC4W_OUT_4313 0x060
+#define GPIO_BTC4W_OUT_4331_SHARED 0x010
+
+#define PCI_CFG_GPIO_SCS 0x10
+#define PCI_CFG_GPIO_HWRAD 0x20
+#define PCI_CFG_GPIO_XTAL 0x40
+#define PCI_CFG_GPIO_PLL 0x80
+
+
+#define PLL_DELAY 150
+#define FREF_DELAY 200
+#define MIN_SLOW_CLK 32
+#define XTAL_ON_DELAY 1000
+
+
+
+#define GPIO_NUMPINS 32
+
+
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329 0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+
+
+#define MUXENAB_UART 0x00000001
+#define MUXENAB_GPIO 0x00000002
+#define MUXENAB_ERCX 0x00000004
+#define MUXENAB_JTAG 0x00000008
+#define MUXENAB_HOST_WAKE 0x00000010
+#define MUXENAB_I2S_EN 0x00000020
+#define MUXENAB_I2S_MASTER 0x00000040
+#define MUXENAB_I2S_FULL 0x00000080
+#define MUXENAB_SFLASH 0x00000100
+#define MUXENAB_RFSWCTRL0 0x00000200
+#define MUXENAB_RFSWCTRL1 0x00000400
+#define MUXENAB_RFSWCTRL2 0x00000800
+#define MUXENAB_SECI 0x00001000
+#define MUXENAB_BT_LEGACY 0x00002000
+#define MUXENAB_HOST_WAKE1 0x00004000
+
+
+#define FLASH_KERNEL_NFLASH 0x00000001
+#define FLASH_BOOT_NFLASH 0x00000002
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 000000000000..22eb7dbcb952
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,278 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmendian.h 241182 2011-02-17 21:50:03Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+ (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
+ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
+ (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+ (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define hton16(i) bcmswap16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define hton32(i) bcmswap32(i)
+#define NTOH16(i) BCMSWAP16(i)
+#define ntoh16(i) bcmswap16(i)
+#define NTOH32(i) BCMSWAP32(i)
+#define ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#endif
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+
+#define load32_ua(a) ltoh32_ua(a)
+#define store32_ua(a, v) htol32_ua_store(v, a)
+#define load16_ua(a) ltoh16_ua(a)
+#define store16_ua(a, v) htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#ifdef __GNUC__
+
+
+
+#define bcmswap16(val) ({ \
+ uint16 _val = (val); \
+ BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+ uint16 *_buf = (uint16 *)(buf); \
+ uint _wds = (len) / 2; \
+ while (_wds--) { \
+ *_buf = bcmswap16(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = (_val >> 8) & 0xff; \
+ _bytes[2] = (_val >> 16) & 0xff; \
+ _bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 8; \
+ _bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 24; \
+ _bytes[1] = (_val >> 16) & 0xff; \
+ _bytes[2] = (_val >> 8) & 0xff; \
+ _bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH32_UA(_bytes); \
+})
+
+#else
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+ return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+ return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+ return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+ len = len / 2;
+
+ while (len--) {
+ *buf = bcmswap16(*buf);
+ buf++;
+ }
+}
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = (val >> 16) & 0xff;
+ bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val >> 8;
+ bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val >> 24;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+ return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+ return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+ return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+ return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
new file mode 100644
index 000000000000..ce0e03508743
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
@@ -0,0 +1,179 @@
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmnvram.h 320632 2012-03-12 19:22:42Z $
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+struct nvram_header {
+ uint32 magic;
+ uint32 len;
+ uint32 crc_ver_init;
+ uint32 config_refresh;
+ uint32 config_ncdl;
+};
+
+struct nvram_tuple {
+ char *name;
+ char *value;
+ struct nvram_tuple *next;
+};
+
+
+extern char *nvram_default_get(const char *name);
+
+
+extern int nvram_init(void *sih);
+
+
+extern int nvram_append(void *si, char *vars, uint varsz);
+
+extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
+
+
+extern int nvram_reset(void *sih);
+
+
+extern void nvram_exit(void *sih);
+
+
+extern char * nvram_get(const char *name);
+
+
+extern int nvram_resetgpio_init(void *sih);
+
+
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+ char *p = nvram_get(name);
+ return p ? p : "";
+}
+
+
+static INLINE int
+nvram_match(char *name, char *match)
+{
+ const char *value = nvram_get(name);
+ return (value && !strcmp(value, match));
+}
+
+
+static INLINE int
+nvram_invmatch(char *name, char *invmatch)
+{
+ const char *value = nvram_get(name);
+ return (value && strcmp(value, invmatch));
+}
+
+
+extern int nvram_set(const char *name, const char *value);
+
+
+extern int nvram_unset(const char *name);
+
+
+extern int nvram_commit(void);
+
+
+extern int nvram_getall(char *nvram_buf, int count);
+
+
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+#endif
+
+
+#define NVRAM_SOFTWARE_VERSION "1"
+
+#define NVRAM_MAGIC 0x48534C46
+#define NVRAM_CLEAR_MAGIC 0x0
+#define NVRAM_INVALID_MAGIC 0xFFFFFFFF
+#define NVRAM_VERSION 1
+#define NVRAM_HEADER_SIZE 20
+#define NVRAM_SPACE 0x8000
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION 9
+#define NVRAM_CRC_VER_MASK 0xffffff00
+
+
+#define NVRAM_START_COMPRESSED 0x400
+#define NVRAM_START 0x1000
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
+ defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
+#define IMAGE_SIZE "image_size"
+#define BOOTPARTITION "bootpartition"
+#define IMAGE_BOOT BOOTPARTITION
+#define PARTIALBOOTS "partialboots"
+#define MAXPARTIALBOOTS "maxpartialboots"
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#endif
+
+#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
+ defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
+
+#define IMAGE_BOOT "image_boot"
+#define BOOTPARTITION IMAGE_BOOT
+
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_SIZE "image_size"
+
+
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+
+
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#define POLICY_TOGGLE "toggle"
+#define LINUX_PART_TO_FLASH "linux_to_flash"
+#define LINUX_FLASH_POLICY "linux_flash_policy"
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 000000000000..44b263cec6a1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,181 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_PCI_SPI_H
+#define _BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ uint32 spih_ctrl; /* 0x00 SPI Control Register */
+ uint32 spih_stat; /* 0x04 SPI Status Register */
+ uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */
+ uint32 spih_ext; /* 0x0C SPI Extension Register */
+ uint32 PAD[4]; /* 0x10-0x1F PADDING */
+
+ uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */
+ uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */
+ uint32 PAD[6]; /* 0x28-0x3F PADDING */
+
+ uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+ uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+ /* 1=Active High) */
+ uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */
+ uint32 spih_int_status; /* 0x4C SPI Interrupt Status */
+ uint32 PAD[4]; /* 0x50-0x5F PADDING */
+
+ uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */
+ uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */
+ uint32 PAD[1]; /* 0x68 PADDING */
+ uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */
+ uint32 PAD[4]; /* 0x70-0x7F PADDING */
+ uint32 PAD[8]; /* 0x80-0x9F PADDING */
+ uint32 PAD[8]; /* 0xA0-0xBF PADDING */
+ uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */
+ uint32 spih_pll_status; /* 0xC4 PLL Status Register */
+ uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */
+ uint32 spih_clk_count; /* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+ uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */
+ uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */
+
+ uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */
+ uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */
+ uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */
+ uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */
+ uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */
+ uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */
+ uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */
+ uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */
+ uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */
+ uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */
+ uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */
+ uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */
+ uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */
+ uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */
+ uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */
+ uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */
+ uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */
+ uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */
+ uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */
+ uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */
+ uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */
+ uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */
+ uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */
+ uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */
+ uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */
+ uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */
+
+ uint32 PAD[5]; /* 0x16C-0x17F PADDING */
+
+ uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */
+ uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */
+ uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */
+ uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+ uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+ uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */
+ uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */
+ uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+ uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+ uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */
+ uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+ uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+ uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+ uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */
+ uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+ uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+ uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+ uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */
+ uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+ uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+ uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+ uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+ uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */
+ uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */
+ uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */
+ uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+ uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */
+ uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */
+ uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */
+#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */
+#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */
+#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND 0xf4240 /* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 000000000000..743830768899
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define BCMPERF_GETICACHE_MISS(x) ((x) = 0)
+#define BCMPERF_GETICACHE_HIT(x) ((x) = 0)
+#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 000000000000..36c3604ca504
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,131 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h 320190 2012-03-09 19:13:53Z $
+ */
+
+#ifndef _sdio_api_h_
+#define _sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS (0x00)
+#define SDIOH_API_RC_FAIL (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+ uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#if defined(BCMSDIOH_STD)
+ #define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 000000000000..b1d9355f8bc1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,238 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ * export functions to client drivers
+ * abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h 327460 2012-04-13 18:38:41Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef _bcmsdh_h_
+#define _bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL 0x0001 /* Error */
+#define BCMSDH_INFO_VAL 0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+#if (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ * - Returns the bcmsdh handle and virtual address base for register access.
+ * The returned handle should be used in all subsequent calls, but the bcmsh
+ * implementation may maintain a single "default" handle (e.g. the first or
+ * most recent one) to enable single-instance implementations to pass NULL.
+ */
+
+#if 0 && (NDISVER >= 0x0630) && 1
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
+ void **regsva, uint irq, shared_info_t *sh);
+#else
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+#endif
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+ /* attach to device */
+ void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ uint16 func, uint bustype, void * regsva, osl_t * osh,
+ void * param);
+ /* detach from device */
+ void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif /* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 000000000000..80f39003ea4a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,123 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h 313732 2012-02-08 19:49:00Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+struct sdioh_info {
+ osl_t *osh; /* osh handler */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
+ struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+ bool use_rxchain;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+ sdioh_info_t *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 000000000000..80c0a3d13eea
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,274 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h 291086 2011-10-21 01:17:24Z $
+ */
+
+#ifndef _bcmsdpcm_h_
+#define _bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */
+#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET 6 /* Version # */
+#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET 7 /* Spare */
+#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL 15
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0 0x01
+#define SDPCM_FLAG_RESVD1 0x02
+#define SDPCM_FLAG_GSPI_TXENAB 0x04
+#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on command.
+ * Len is current or requested frame length, not
+ * including test header; sent little-endian.
+ */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
+ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
+ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
+ uint32 rxdescuflo; /* receive descriptor underflows */
+ uint32 rxfifooflo; /* receive fifo overflows */
+ uint32 txfifouflo; /* transmit fifo underflows */
+ uint32 runt; /* runt (too short) frames recv'd from bus */
+ uint32 badlen; /* frame's rxh len does not match its hw tag len */
+ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
+ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */
+ uint32 rxfcrc; /* frame rx header indicates crc error */
+ uint32 rxfwoos; /* frame rx header indicates write out of sync */
+ uint32 rxfwft; /* frame rx header indicates write frame termination */
+ uint32 rxfabort; /* frame rx header indicates frame aborted */
+ uint32 woosint; /* write out of sync interrupt */
+ uint32 roosint; /* read out of sync interrupt */
+ uint32 rftermint; /* read frame terminate interrupt */
+ uint32 wftermint; /* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val) ((var) == (val))
+#define SDIODREV_GE(var, val) ((var) >= (val))
+#define SDIODREV_GT(var, val) ((var) > (val))
+#define SDIODREV_LT(var, val) ((var) < (val))
+#define SDIODREV_LE(var, val) ((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+ (SDIODREV_LT((h)->corerev, 1) ? \
+ SDIODDMAREG32((h), (dir), (chnl)) : \
+ SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODDMAREG(h, dir, chnl) : \
+ PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+ (SDIODREV_LT((corerev), 1) ? \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODFIFOREG(h, corerev) : \
+ PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0001
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+#define SDPCM_SHARED_IN_BRPT 0x0800
+#define SDPCM_SHARED_SET_BRPT 0x1000
+#define SDPCM_SHARED_PENDING_BRPT 0x2000
+
+typedef struct {
+ uint32 flags;
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hndrte_cons_t */
+ uint32 msgtrace_addr;
+ uint32 brpt_addr;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+/* Function can be used to notify host of FW halt */
+extern void sdpcmd_fwhalt(void);
+
+#endif /* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 000000000000..3d444f3ba265
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ */
+#ifndef _BCM_SD_SPI_H
+#define _BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 000000000000..8acc0044d502
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,248 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h 324797 2012-03-30 11:02:00Z $
+ */
+#ifndef _BCM_SD_STD_H
+#define _BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd) do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+#define HC_INTR_RETUNING 0x1000
+
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ ulong dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ ulong dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ ulong adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ * Card in terms of Voltage [1.8 or 3.3].
+ */
+ int global_UHSI_Supp; /* type of UHSI support in both host and card.
+ * HOST_SDR_UNSUPP: capabilities not supported/matched
+ * HOST_SDR_12_25: SDR12 and SDR25 supported
+ * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+ */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 0
+#define TUNING_START 1
+#define TUNING_START_AFTER_DAT 2
+#define TUNING_ONGOING 3
+
+#define DATA_TRANSFER_IDLE 0
+#define DATA_TRANSFER_ONGOING 1
+
+#define CHECK_TUNING_PRE_DATA 1
+#define CHECK_TUNING_POST_DATA 2
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 000000000000..e226cb102292
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_SPI_H
+#define _BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
new file mode 100644
index 000000000000..0975dc8acdad
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
@@ -0,0 +1,139 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_SPI_BRCM_H
+#define _BCM_SPI_BRCM_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_F1 64
+#define BLOCK_SIZE_F2 2048
+#define BLOCK_SIZE_F3 2048
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+#define ERROR_UF 2
+#define ERROR_OF 3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ void *bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of spi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 card_dstatus; /* 32bit device status */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SPI_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ uint32 wordlen; /* host processor 16/32bits */
+ uint32 prev_fun;
+ uint32 chip;
+ uint32 chiprev;
+ bool resp_delay_all;
+ bool dwordmode;
+ bool resp_delay_new;
+
+ struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S 31
+#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S 30
+#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S 28
+#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S 11
+#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */
+#define SPI_LEN_S 0
+
+#endif /* _BCM_SPI_BRCM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
new file mode 100644
index 000000000000..f5246a56129a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
@@ -0,0 +1,607 @@
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_fmt.h 327439 2012-04-13 17:44:48Z $
+ */
+
+#ifndef _bcmsrom_fmt_h_
+#define _bcmsrom_fmt_h_
+
+#define SROM_MAXREV 11 /* max revisiton supported by driver */
+
+/* Maximum srom: 6 Kilobits == 768 bytes */
+#define SROM_MAX 768
+#define SROM_MAXW 384
+#define VARS_MAX 4096
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+
+#define SROM_WORDS 64
+
+#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
+
+#define SROM_SSID 2
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET0 39
+#define SROM_MACMID_ET0 40
+#define SROM_MACLO_ET0 41
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM2_ENETPHY 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+#define SROM4_AA2G_MASK 0x00ff
+#define SROM4_AA2G_SHIFT 0
+#define SROM4_AA5G_MASK 0xff00
+#define SROM4_AA5G_SHIFT 8
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define B2G_MAXP_MASK 0xff
+#define B2G_ITT_SHIFT 8
+#define B5G_MAXP_MASK 0xff
+#define B5G_ITT_SHIFT 8
+#define B5GH_MAXP_MASK 0xff
+#define B5GL_MAXP_SHIFT 8
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define SROM8_SIGN 64
+
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+#define SROM8_EXTLNAGAIN 93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2 95
+
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define SROM8_CRCREV 219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+#define SROM8_RXGAINERR_2G 205
+#define SROM8_RXGAINERR_5GL 206
+#define SROM8_RXGAINERR_5GM 207
+#define SROM8_RXGAINERR_5GH 208
+#define SROM8_RXGAINERR_5GU 209
+#define SROM8_SUBBAND_PPR 210
+#define SROM8_PCIEINGRESS_WAR 211
+#define SROM9_SAR 212
+
+#define SROM8_NOISELVL_2G 213
+#define SROM8_NOISELVL_5GL 214
+#define SROM8_NOISELVL_5GM 215
+#define SROM8_NOISELVL_5GH 216
+#define SROM8_NOISELVL_5GU 217
+
+#define SROM9_REV_CRC 219
+
+#define SROM10_CCKPWROFFSET 218
+#define SROM10_SIGN 219
+#define SROM10_SWCTRLMAP_2G 220
+#define SROM10_CRCREV 229
+
+#define SROM10_WORDS 230
+#define SROM10_SIGNATURE SROM4_SIGNATURE
+
+
+/* SROM REV 11 */
+#define SROM11_BREV 65
+
+#define SROM11_BFL0 66
+#define SROM11_BFL1 67
+#define SROM11_BFL2 68
+#define SROM11_BFL3 69
+#define SROM11_BFL4 70
+#define SROM11_BFL5 71
+
+#define SROM11_MACHI 72
+#define SROM11_MACMID 73
+#define SROM11_MACLO 74
+
+#define SROM11_CCODE 75
+#define SROM11_REGREV 76
+
+#define SROM11_LEDBH10 77
+#define SROM11_LEDBH32 78
+
+#define SROM11_LEDDC 79
+
+#define SROM11_AA 80
+
+#define SROM11_AGBG10 81
+#define SROM11_AGBG2A0 82
+#define SROM11_AGA21 83
+
+#define SROM11_TXRXC 84
+
+#define SROM11_FEM_CFG1 85
+#define SROM11_FEM_CFG2 86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK 0xf800
+#define SROM11_FEMCTRL_SHIFT 11
+#define SROM11_PAPDCAP_MASK 0x0400
+#define SROM11_PAPDCAP_SHIFT 10
+#define SROM11_TWORANGETSSI_MASK 0x0200
+#define SROM11_TWORANGETSSI_SHIFT 9
+#define SROM11_PDGAIN_MASK 0x01f0
+#define SROM11_PDGAIN_SHIFT 4
+#define SROM11_EPAGAIN_MASK 0x000e
+#define SROM11_EPAGAIN_SHIFT 1
+#define SROM11_TSSIPOSSLOPE_MASK 0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT 0
+#define SROM11_GAINCTRLSPH_MASK 0xf800
+#define SROM11_GAINCTRLSPH_SHIFT 11
+
+#define SROM11_THERMAL 87
+#define SROM11_MPWR_RAWTS 88
+#define SROM11_TS_SLP_OPT_CORRX 89
+#define SROM11_PHYCAL_TEMPDELTA 92
+#define SROM11_MPWR_1_AND_2 93
+
+/* Masks and offsets for Terrmal parameters */
+#define SROM11_TEMPS_PERIOD_MASK 0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT 4
+#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT 0
+#define SROM11_TEMPCORRX_MASK 0xfc
+#define SROM11_TEMPCORRX_SHIFT 2
+#define SROM11_TEMPSENSE_OPTION_MASK 0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT 0
+
+#define SROM11_PDOFF_40M_A0 101
+#define SROM11_PDOFF_40M_A1 102
+#define SROM11_PDOFF_40M_A2 103
+#define SROM11_PDOFF_80M_A0 104
+#define SROM11_PDOFF_80M_A1 105
+#define SROM11_PDOFF_80M_A2 106
+
+#define SROM11_SUBBAND5GVER 107
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_11 3
+#define SROM11_PATH0 108
+#define SROM11_PATH1 128
+#define SROM11_PATH2 148
+
+#define SROM11_2G_MAXP 0
+#define SROM11_2G_PA 1
+#define SROM11_RXGAINS1 4
+#define SROM11_RXGAINS 5
+#define SROM11_5GB1B0_MAXP 6
+#define SROM11_5GB3B2_MAXP 7
+#define SROM11_5GB0_PA 8
+#define SROM11_5GB1_PA 11
+#define SROM11_5GB2_PA 14
+#define SROM11_5GB3_PA 17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS2GTRISOA_MASK 0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT 3
+#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT 3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO 168
+#define SROM11_CCKBW20UL2GPO 169
+#define SROM11_MCSBW202GPO 170
+#define SROM11_MCSBW202GPO_1 171
+#define SROM11_MCSBW402GPO 172
+#define SROM11_MCSBW402GPO_1 173
+#define SROM11_DOT11AGOFDMHRBW202GPO 174
+#define SROM11_OFDMLRBW202GPO 175
+
+#define SROM11_MCSBW205GLPO 176
+#define SROM11_MCSBW205GLPO_1 177
+#define SROM11_MCSBW405GLPO 178
+#define SROM11_MCSBW405GLPO_1 179
+#define SROM11_MCSBW805GLPO 180
+#define SROM11_MCSBW805GLPO_1 181
+#define SROM11_MCSBW1605GLPO 182
+#define SROM11_MCSBW1605GLPO_1 183
+#define SROM11_MCSBW205GMPO 184
+#define SROM11_MCSBW205GMPO_1 185
+#define SROM11_MCSBW405GMPO 186
+#define SROM11_MCSBW405GMPO_1 187
+#define SROM11_MCSBW805GMPO 188
+#define SROM11_MCSBW805GMPO_1 189
+#define SROM11_MCSBW1605GMPO 190
+#define SROM11_MCSBW1605GMPO_1 191
+#define SROM11_MCSBW205GHPO 192
+#define SROM11_MCSBW205GHPO_1 193
+#define SROM11_MCSBW405GHPO 194
+#define SROM11_MCSBW405GHPO_1 195
+#define SROM11_MCSBW805GHPO 196
+#define SROM11_MCSBW805GHPO_1 197
+#define SROM11_MCSBW1605GHPO 198
+#define SROM11_MCSBW1605GHPO_1 199
+
+#define SROM11_MCSLR5GLPO 200
+#define SROM11_MCSLR5GMPO 201
+#define SROM11_MCSLR5GHPO 202
+
+#define SROM11_SB20IN40HRPO 203
+#define SROM11_SB20IN80AND160HR5GLPO 204
+#define SROM11_SB40AND80HR5GLPO 205
+#define SROM11_SB20IN80AND160HR5GMPO 206
+#define SROM11_SB40AND80HR5GMPO 207
+#define SROM11_SB20IN80AND160HR5GHPO 208
+#define SROM11_SB40AND80HR5GHPO 209
+#define SROM11_SB20IN40LRPO 210
+#define SROM11_SB20IN80AND160LR5GLPO 211
+#define SROM11_SB40AND80LR5GLPO 212
+#define SROM11_SB20IN80AND160LR5GMPO 213
+#define SROM11_SB40AND80LR5GMPO 214
+#define SROM11_SB20IN80AND160LR5GHPO 215
+#define SROM11_SB40AND80LR5GHPO 216
+
+#define SROM11_DOT11AGDUPHRPO 217
+#define SROM11_DOT11AGDUPLRPO 218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR 220
+#define SROM11_SAR 221
+
+#define SROM11_NOISELVL_2G 222
+#define SROM11_NOISELVL_5GL 223
+#define SROM11_NOISELVL_5GM 224
+#define SROM11_NOISELVL_5GH 225
+#define SROM11_NOISELVL_5GU 226
+
+#define SROM11_RXGAINERR_2G 227
+#define SROM11_RXGAINERR_5GL 228
+#define SROM11_RXGAINERR_5GM 229
+#define SROM11_RXGAINERR_5GH 230
+#define SROM11_RXGAINERR_5GU 231
+
+#define SROM11_SIGN 64
+#define SROM11_CRCREV 233
+
+#define SROM11_WORDS 234
+#define SROM11_SIGNATURE 0x0634
+
+typedef struct {
+ uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
+ uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
+ uint8 triso; /* TR switch isolation */
+ uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif /* _bcmsrom_fmt_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
new file mode 100644
index 000000000000..040ae6a44b0a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
@@ -0,0 +1,900 @@
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_tbl.h 327694 2012-04-16 13:22:24Z $
+ */
+
+#ifndef _bcmsrom_tbl_h_
+#define _bcmsrom_tbl_h_
+
+#include "sbpcmcia.h"
+#include "wlioctl.h"
+
+typedef struct {
+ const char *name;
+ uint32 revmask;
+ uint32 flags;
+ uint16 off;
+ uint16 mask;
+} sromvar_t;
+
+#define SRFL_MORE 1 /* value continues as described by the next entry */
+#define SRFL_NOFFS 2 /* value bits can't be all one's */
+#define SRFL_PRHEX 4 /* value is in hexdecimal format */
+#define SRFL_PRSIGN 8 /* value is in signed decimal format */
+#define SRFL_CCODE 0x10 /* value is in country code format */
+#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
+#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
+#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST
+ * ONE in the array should have this flag set.
+ */
+
+
+/* Assumptions:
+ * - Ethernet address spans across 3 consective words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ * (even multiple fields in the same word) with each entry except the last having
+ * it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ * bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ * entries must have non-NULL name.
+ */
+
+static const sromvar_t pci_sromvars[] = {
+ {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff},
+ {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
+ {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
+ {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
+ {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
+ {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM_BFL2, 0xffff},
+ {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM3_BFL2, 0xffff},
+ {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff},
+ {"", 0, 0, SROM4_BFL1, 0xffff},
+ {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff},
+ {"", 0, 0, SROM5_BFL1, 0xffff},
+ {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff},
+ {"", 0, 0, SROM8_BFL1, 0xffff},
+ {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff},
+ {"", 0, 0, SROM4_BFL3, 0xffff},
+ {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff},
+ {"", 0, 0, SROM5_BFL3, 0xffff},
+ {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff},
+ {"", 0, 0, SROM8_BFL3, 0xffff},
+ {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
+
+ {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
+ {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff},
+ {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff},
+ {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff},
+ {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff},
+ {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
+ {"regrev", 0x00000008, 0, SROM_OPO, 0xff00},
+ {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff},
+ {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff},
+ {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff},
+ {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
+ {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
+ {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
+ {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
+ {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
+ {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
+ {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
+ {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
+ {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
+ {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
+ {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
+ {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff},
+ {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
+ {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
+ {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
+ {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
+ {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00},
+ {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff},
+ {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff},
+ {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff},
+ {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
+ {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff},
+ {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff},
+ {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
+ {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00},
+ {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00},
+ {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff},
+ {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00},
+ {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff},
+ {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00},
+ {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff},
+ {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00},
+ {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff},
+ {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00},
+ {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff},
+ {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00},
+ {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
+ {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
+ {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
+ {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
+ {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
+ {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
+ {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
+ {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
+ {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
+ {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00},
+ {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
+ {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
+ {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
+ {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
+ {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
+ {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
+ {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
+ {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
+ {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
+ {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
+ {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
+ {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
+ {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00},
+ {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff},
+ {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00},
+ {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
+ {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
+ {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f},
+ {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
+ {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f},
+ {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff},
+ {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00},
+ {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00},
+ {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff},
+ {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00},
+ {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00},
+ {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
+ {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
+ {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK},
+ {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK},
+ {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK},
+ {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK},
+ {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
+ {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
+ {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
+ {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
+ {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
+ {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
+ {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
+ {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
+ {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
+ {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
+ {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
+ {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
+ {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
+ {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
+ {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
+ {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
+
+ {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
+ {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
+ {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+ {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff},
+ {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
+ {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
+ {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
+ {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
+ {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff},
+ {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff},
+ {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff},
+ {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff},
+ {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff},
+ {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff},
+
+ {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00},
+ {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff},
+ {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300},
+ {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f},
+ {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010},
+ {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020},
+ {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff},
+ {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00},
+ {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80},
+
+ {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
+ {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
+ {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
+ {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
+ {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
+ {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
+ {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff},
+ {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff},
+ {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff},
+ {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
+ {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff},
+ {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff},
+ {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff},
+ {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
+
+ /* power per rate from sromrev 9 */
+ {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff},
+ {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
+ {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
+ {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
+ {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff},
+ {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff},
+ {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f},
+ {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0},
+ {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800},
+ {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f},
+ {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0},
+ {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800},
+ {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f},
+ {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0},
+ {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800},
+ {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800},
+ {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff},
+ {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00},
+ {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00},
+ {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f},
+ {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0},
+ {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00},
+ {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f},
+ {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0},
+ {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00},
+ {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f},
+ {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0},
+ {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00},
+ {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f},
+ {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00},
+ {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7},
+
+ {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff},
+ /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+ {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff},
+ {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff},
+
+ /* sromrev 11 */
+ {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL3, 0xffff},
+ {"", 0, 0, SROM11_BFL3, 0xffff},
+ {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff},
+ {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff},
+ {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff},
+ {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff},
+ {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff},
+ {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00},
+ {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff},
+ {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00},
+ {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff},
+ {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff},
+ {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00},
+ {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0x00ff},
+ {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0xff00},
+ {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff},
+ {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00},
+ {"aga1", 0xfffff800, 0, SROM11_AGA21, 0x00ff},
+ {"aga2", 0xfffff800, 0, SROM11_AGA21, 0xff00},
+ {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK},
+
+ {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001},
+ {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e},
+ {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0},
+ {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200},
+ {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400},
+ {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800},
+
+ {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001},
+ {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e},
+ {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0},
+ {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200},
+ {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400},
+ {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800},
+
+ {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00},
+ {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff},
+ {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300},
+ {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80},
+ {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff},
+ {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff},
+ {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff},
+ {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff},
+ {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff},
+ {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff},
+
+ {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff},
+
+ /* power per rate */
+ {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff},
+ {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff},
+ {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff},
+ {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff},
+ {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff},
+ {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff},
+ {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff},
+ {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff},
+ {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff},
+ {"mcsbw1605glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GLPO_1, 0xffff},
+ {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff},
+ {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff},
+ {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff},
+ {"mcsbw1605gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GMPO_1, 0xffff},
+ {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff},
+ {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff},
+ {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff},
+ {"mcsbw1605ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GHPO_1, 0xffff},
+ {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xffff},
+ {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff},
+ {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff},
+ {"sb20in40hrrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff},
+ {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff},
+ {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff},
+ {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff},
+ {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff},
+ {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff},
+ {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff},
+ {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff},
+ {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff},
+ {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff},
+ {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff},
+ {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff},
+ {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff},
+ {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff},
+ {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff},
+ {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff},
+
+ /* Misc */
+ {"pcieingress_war", 0xfffff800, 0, SROM11_PCIEINGRESS_WAR, 0xf},
+ {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff},
+ {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00},
+
+ {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00},
+ {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f},
+ {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00},
+
+ {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800},
+
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t perpath_pci_sromvars[] = {
+ {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
+ {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
+ {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
+ {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
+ {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
+ {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
+ {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff},
+ {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff},
+ {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff},
+ {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff},
+ {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
+ {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
+ {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
+ {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
+ {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff},
+ {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff},
+
+ /* sromrev 11 */
+ {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff},
+ {"pa2ga", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff},
+ {"rxgains5gmelnagaina", 0xfffff800, 0, SROM11_RXGAINS1, 0x0007},
+ {"rxgains5gmtrisoa", 0xfffff800, 0, SROM11_RXGAINS1, 0x0078},
+ {"rxgains5gmtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS1, 0x0080},
+ {"rxgains5ghelnagaina", 0xfffff800, 0, SROM11_RXGAINS1, 0x0700},
+ {"rxgains5ghtrisoa", 0xfffff800, 0, SROM11_RXGAINS1, 0x7800},
+ {"rxgains5ghtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS1, 0x8000},
+ {"rxgains2gelnagaina", 0xfffff800, 0, SROM11_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0xfffff800, 0, SROM11_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0xfffff800, 0, SROM11_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0xfffff800, 0, SROM11_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS, 0x8000},
+ {"maxp5ga", 0xfffff800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff},
+ {"", 0xfffff800, 0, SROM11_5GB3B2_MAXP, 0xff00},
+ {"pa5ga", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff},
+
+ {NULL, 0, 0, 0, 0}
+};
+
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP))
+#define PHY_TYPE_HT 7 /* HT-Phy value */
+#define PHY_TYPE_N 4 /* N-Phy value */
+#define PHY_TYPE_LP 5 /* LP-Phy value */
+#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP)) */
+#if !defined(PHY_TYPE_AC)
+#define PHY_TYPE_AC 11 /* AC-Phy value */
+#endif /* !defined(PHY_TYPE_AC) */
+#if !defined(PHY_TYPE_NULL)
+#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
+#endif /* !defined(PHY_TYPE_NULL) */
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ uint16 chain;
+ const char *vars;
+} pavars_t;
+
+static const pavars_t pavars[] = {
+ /* HTPHY */
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ /* LPPHY */
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_2G, 0, "pa0b0 pa0b1 pa0b2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GL, 0, "pa1lob0 pa1lob1 pa1lob2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GM, 0, "pa1b0 pa1b1 pa1b2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GH, 0, "pa1hib0 pa1hib1 pa1hib2"},
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ const char *vars;
+} povars_t;
+
+static const povars_t povars[] = {
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
+ "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
+ "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
+ "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
+ "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
+ {PHY_TYPE_NULL, 0, ""}
+};
+
+typedef struct {
+ uint8 tag; /* Broadcom subtag name */
+ uint32 revmask; /* Supported cis_sromrev */
+ uint8 len; /* Length field of the tuple, note that it includes the
+ * subtag name (1 byte): 1 + tuple content length
+ */
+ const char *params;
+} cis_tuple_t;
+
+#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */
+#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */
+#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */
+#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */
+
+static const cis_tuple_t cis_hnbuvars[] = {
+ {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */
+ {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */
+ {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */
+ /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+ {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"},
+ {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"},
+ /* NOTE: subdevid is also written to boardtype.
+ * Need to write HNBU_BOARDTYPE to change it if it is different.
+ */
+ {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+ {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"},
+ {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+ {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"},
+ {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+ {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+ {HNBU_BOARDFLAGS, 0xffffffff, 13, "4boardflags 4boardflags2 4boardflags3"},
+ {HNBU_LEDS, 0xffffffff, 5, "1ledbh0 1ledbh1 1ledbh2 1ledbh3"},
+ {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"},
+ {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"},
+ {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"},
+ {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+ "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+ "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+ {HNBU_RDLID, 0xffffffff, 3, "2rdlid"},
+ {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+ "0rssisav2g 0bxa2g"}, /* special case */
+ {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+ "0rssisav5g 0bxa5g"}, /* special case */
+ {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"},
+ {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"},
+ {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+ {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"},
+ {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"},
+ {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"},
+ {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */
+ {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"},
+ {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"},
+ {HNBU_LEDDC, 0xffffffff, 3, "2leddc"},
+ {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"},
+ {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+ {HNBU_REGREV, 0xffffffff, 2, "1regrev"},
+ {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+ "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+ {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+ "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+ "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+ {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+ "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+ "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+ {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+ "4ofdm5ghpo"},
+ {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+ "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+ {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+ "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+ {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+ "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+ "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+ "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+ {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"},
+ {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"},
+ {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"},
+ {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"},
+ {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"},
+ {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"},
+ {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"},
+ {HNBU_USBFS, 0xffffffff, 2, "1usbfs"},
+ {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"},
+ {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"},
+ {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"},
+ {OTP_RAW, 0xffffffff, 0, ""}, /* special case */
+ {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+ {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"},
+ {HNBU_CCKBW202GPO, 0xffffffff, 5, "2cckbw202gpo 2cckbw20ul2gpo"},
+ {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gp"},
+ {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+ "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+ {HNBU_MCS2GPO, 0xffffffff, 13, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo"},
+ {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+ {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+ {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+ {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"},
+ {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"},
+ {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis "
+ "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+ "1phycal_tempdelta"}, /* special case */
+ {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"},
+ {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+ "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+ "0tssiposslope5g"}, /* special case */
+ {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
+ "1*4maxp5ga0 2*12pa5ga0"},
+ {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+ {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+ {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"},
+ {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+ "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+ {HNBU_ACPPR_2GPO, 0xfffff800, 5, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo"},
+ {HNBU_ACPPR_5GPO, 0xfffff800, 31, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+ "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5rlpo 2mcslr5gmpo 2mcslr5ghpo"},
+ {HNBU_ACPPR_SBPO, 0xfffff800, 33, "2sb20in40hrrpo 2sb20in80and160hr5glpo "
+ "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+ "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+ "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+ "2dot11agduphrpo 2dot11agduplrpo"},
+ {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 "
+ "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"},
+ {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 "
+ "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"},
+ {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+ {HNBU_UUID, 0xffffffff, 17, "16uuid"},
+ {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"},
+ {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+ "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+ "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+ "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */
+ {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+ "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+ "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+ "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */
+ {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+ "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+ "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+ "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */
+ {0xFF, 0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 000000000000..29f8dd7f9bdb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,775 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h 328848 2012-04-21 00:43:57Z $
+ */
+
+#ifndef _bcmutils_h_
+#define _bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef PKTQ_LOG
+#include <wlioctl.h>
+#endif
+
+
+#define _BCM_U 0x01
+#define _BCM_L 0x02
+#define _BCM_D 0x04
+#define _BCM_C 0x08
+#define _BCM_P 0x10
+#define _BCM_S 0x20
+#define _BCM_X 0x40
+#define _BCM_SP 0x80
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+ char *buf;
+ unsigned int size;
+ char *origbuf;
+ unsigned int origsize;
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 0x20
+
+
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+}
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16
+#endif
+
+typedef struct pktq_prec {
+ void *head;
+ void *tail;
+ uint16 len;
+ uint16 max;
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+ uint32 requested;
+ uint32 stored;
+ uint32 saved;
+ uint32 selfsaved;
+ uint32 full_dropped;
+ uint32 dropped;
+ uint32 sacrificed;
+ uint32 busy;
+ uint32 retry;
+ uint32 ps_retry;
+ uint32 retry_drop;
+ uint32 max_avail;
+ uint32 max_used;
+ uint32 queue_capacity;
+} pktq_counters_t;
+#endif
+
+
+#define PKTQ_COMMON \
+ uint16 num_prec; \
+ uint16 hi_prec; \
+ uint16 max; \
+ uint16 len;
+
+
+struct pktq {
+ PKTQ_COMMON
+
+ struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+ pktq_counters_t _prec_cnt[PKTQ_MAX_PREC];
+#endif
+};
+
+
+struct spktq {
+ PKTQ_COMMON
+
+ struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool) ((pool) && (pool)->inited)
+#if defined(BCM4329C0)
+#define SHARED_POOL (pktpool_shared_ptr)
+#else
+#define SHARED_POOL (pktpool_shared)
+#endif
+#else
+#define POOL_ENAB(bus) 0
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX 40
+#endif
+#define PKTPOOL_CB_MAX 3
+
+struct pktpool;
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+ pktpool_cb_t cb;
+ void *arg;
+} pktpool_cbinfo_t;
+
+#ifdef BCMDBG_POOL
+
+#define POOL_IDLE 0
+#define POOL_RXFILL 1
+#define POOL_RXDH 2
+#define POOL_RXD11 3
+#define POOL_TXDH 4
+#define POOL_TXD11 5
+#define POOL_AMPDU 6
+#define POOL_TXENQ 7
+
+typedef struct {
+ void *p;
+ uint32 cycles;
+ uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+ uint8 txdh;
+ uint8 txd11;
+ uint8 enq;
+ uint8 rxdh;
+ uint8 rxd11;
+ uint8 rxfill;
+ uint8 idle;
+} pktpool_stats_t;
+#endif
+
+typedef struct pktpool {
+ bool inited;
+ uint16 r;
+ uint16 w;
+ uint16 len;
+ uint16 maxlen;
+ uint16 plen;
+ bool istx;
+ bool empty;
+ uint8 cbtoggle;
+ uint8 cbcnt;
+ uint8 ecbcnt;
+ bool emptycb_disable;
+ pktpool_cbinfo_t *availcb_excl;
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+ void *q[PKTPOOL_LEN_MAX + 1];
+
+#ifdef BCMDBG_POOL
+ uint8 dbg_cbcnt;
+ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+ uint16 dbg_qlen;
+ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+#if defined(BCM4329C0)
+extern pktpool_t *pktpool_shared_ptr;
+#else
+extern pktpool_t *pktpool_shared;
+#endif
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern uint16 pktpool_avail(pktpool_t *pktp);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+
+#define POOLPTR(pp) ((pktpool_t *)(pp))
+#define pktpool_len(pp) (POOLPTR(pp)->len - 1)
+#define pktpool_plen(pp) (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+ ifpkt_cb_t fn, int arg);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+
+#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktoffset(osl_t *osh, void *p, uint offset);
+
+
+#define PKTPRIO_VDSCP 0x100
+#define PKTPRIO_VLAN 0x200
+#define PKTPRIO_UPD 0x400
+#define PKTPRIO_DSCP 0x800
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+
+
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+#define NVRAM_RECLAIM_CHECK(name)
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define bcmlog(fmt, a1, a2)
+#define bcmdumplog(buf, size) *buf = '\0'
+#define bcmdumplogent(buf, idx) -1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(buf, size)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+
+
+
+typedef struct bcm_iovar {
+ const char *name;
+ uint16 varid;
+ uint16 flags;
+ uint16 type;
+ uint16 minlen;
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0
+#define IOV_SET 1
+
+
+#define IOV_GVAL(id) ((id) * 2)
+#define IOV_SVAL(id) ((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid) (actionid >> 1)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif
+#endif
+
+
+#define IOVT_VOID 0
+#define IOVT_BOOL 1
+#define IOVT_INT8 2
+#define IOVT_UINT8 3
+#define IOVT_INT16 4
+#define IOVT_UINT16 5
+#define IOVT_INT32 6
+#define IOVT_UINT32 7
+#define IOVT_BUFFER 8
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+ "void", \
+ "bool", \
+ "int8", \
+ "uint8", \
+ "int16", \
+ "uint16", \
+ "int32", \
+ "uint32", \
+ "buffer", \
+ "" }
+
+#define BCM_IOVT_IS_INT(type) (\
+ (type == IOVT_BOOL) || \
+ (type == IOVT_INT8) || \
+ (type == IOVT_UINT8) || \
+ (type == IOVT_INT16) || \
+ (type == IOVT_UINT16) || \
+ (type == IOVT_INT32) || \
+ (type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 64
+#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK 0
+#define BCME_ERROR -1
+#define BCME_BADARG -2
+#define BCME_BADOPTION -3
+#define BCME_NOTUP -4
+#define BCME_NOTDOWN -5
+#define BCME_NOTAP -6
+#define BCME_NOTSTA -7
+#define BCME_BADKEYIDX -8
+#define BCME_RADIOOFF -9
+#define BCME_NOTBANDLOCKED -10
+#define BCME_NOCLK -11
+#define BCME_BADRATESET -12
+#define BCME_BADBAND -13
+#define BCME_BUFTOOSHORT -14
+#define BCME_BUFTOOLONG -15
+#define BCME_BUSY -16
+#define BCME_NOTASSOCIATED -17
+#define BCME_BADSSIDLEN -18
+#define BCME_OUTOFRANGECHAN -19
+#define BCME_BADCHAN -20
+#define BCME_BADADDR -21
+#define BCME_NORESOURCE -22
+#define BCME_UNSUPPORTED -23
+#define BCME_BADLEN -24
+#define BCME_NOTREADY -25
+#define BCME_EPERM -26
+#define BCME_NOMEM -27
+#define BCME_ASSOCIATED -28
+#define BCME_RANGE -29
+#define BCME_NOTFOUND -30
+#define BCME_WME_NOT_ENABLED -31
+#define BCME_TSPEC_NOTFOUND -32
+#define BCME_ACM_NOTSUPPORTED -33
+#define BCME_NOT_WME_ASSOCIATION -34
+#define BCME_SDIO_ERROR -35
+#define BCME_DONGLE_DOWN -36
+#define BCME_VERSION -37
+#define BCME_TXFAIL -38
+#define BCME_RXFAIL -39
+#define BCME_NODEVICE -40
+#define BCME_NMODE_DISABLED -41
+#define BCME_NONRESIDENT -42
+#define BCME_LAST BCME_NONRESIDENT
+
+
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "NMODE Disabled", \
+ "Nonresident overlay access", \
+}
+
+#ifndef ABS
+#define ABS(a) (((a) < 0) ? -(a) : (a))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask) !((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+
+#include <stddef.h>
+#define OFFSETOF(type, member) offsetof(type, member)
+#else
+#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+#endif
+#endif
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
+#endif
+
+
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f))
+
+
+#ifndef setbit
+#ifndef NBBY
+#define NBBY 8
+#endif
+#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE 0xff
+#define CRC8_GOOD_VALUE 0x9f
+#define CRC16_INIT_VALUE 0xffff
+#define CRC16_GOOD_VALUE 0xf0b8
+#define CRC32_INIT_VALUE 0xffffffff
+#define CRC32_GOOD_VALUE 0xdebb20e3
+
+
+#define MACF "%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \
+ ((struct ether_addr *) (ea))->octet[1], \
+ ((struct ether_addr *) (ea))->octet[2], \
+ ((struct ether_addr *) (ea))->octet[3], \
+ ((struct ether_addr *) (ea))->octet[4], \
+ ((struct ether_addr *) (ea))->octet[5]
+
+#define ETHER_TO_MACF(ea) (ea).octet[0], \
+ (ea).octet[1], \
+ (ea).octet[2], \
+ (ea).octet[3], \
+ (ea).octet[4], \
+ (ea).octet[5]
+
+
+typedef struct bcm_bit_desc {
+ uint32 bit;
+ const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN 18
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+ if (
+#ifdef __i386__
+ 1 ||
+#endif
+ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+
+
+ ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+ ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+ ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+ ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+ } else {
+
+ int k;
+ for (k = 0; k < 16; k++)
+ dst[k] = src1[k] ^ src2[k];
+ }
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit))
+#define mboolclr(mb, bit) ((mb) &= ~(bit))
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+
+struct fielddesc {
+ const char *nameandfmt;
+ uint32 offset;
+ uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+ char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h
new file mode 100644
index 000000000000..c5ea223ec8c8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dbus.h
@@ -0,0 +1,571 @@
+/*
+ * Dongle BUS interface Abstraction layer
+ * target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dbus.h 323680 2012-03-26 17:52:31Z $
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+
+#define DBUSTRACE(args)
+#define DBUSERR(args)
+#define DBUSINFO(args)
+#define DBUSTRACE(args)
+#define DBUSDBGLOCK(args)
+
+enum {
+ DBUS_OK = 0,
+ DBUS_ERR = -200,
+ DBUS_ERR_TIMEOUT,
+ DBUS_ERR_DISCONNECT,
+ DBUS_ERR_NODEVICE,
+ DBUS_ERR_UNSUPPORTED,
+ DBUS_ERR_PENDING,
+ DBUS_ERR_NOMEM,
+ DBUS_ERR_TXFAIL,
+ DBUS_ERR_TXTIMEOUT,
+ DBUS_ERR_TXDROP,
+ DBUS_ERR_RXFAIL,
+ DBUS_ERR_RXDROP,
+ DBUS_ERR_TXCTLFAIL,
+ DBUS_ERR_RXCTLFAIL,
+ DBUS_ERR_REG_PARAM,
+ DBUS_STATUS_CANCELLED,
+ DBUS_ERR_NVRAM,
+ DBUS_JUMBO_NOMATCH,
+ DBUS_JUMBO_BAD_FORMAT,
+ DBUS_NVRAM_NONTXT
+};
+
+#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */
+#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */
+#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */
+
+#define ERR_CBMASK_TXFAIL 0x00000001
+#define ERR_CBMASK_RXFAIL 0x00000002
+#define ERR_CBMASK_ALL 0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE 0
+#define DBUS_CBCTL_READ 1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL 2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */
+
+#define DBUS_BUFFER_SIZE_TX 16000
+#define DBUS_BUFFER_SIZE_RX 5000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG 2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG 2048
+
+/* DBUS types */
+enum {
+ DBUS_USB,
+ DBUS_SDIO,
+ DBUS_SPI,
+ DBUS_UNKNOWN
+};
+
+enum dbus_state {
+ DBUS_STATE_DL_PENDING,
+ DBUS_STATE_DL_DONE,
+ DBUS_STATE_UP,
+ DBUS_STATE_DOWN,
+ DBUS_STATE_PNP_FWDL,
+ DBUS_STATE_DISCONNECT,
+ DBUS_STATE_SLEEP
+};
+
+enum dbus_pnp_state {
+ DBUS_PNP_DISCONNECT,
+ DBUS_PNP_SLEEP,
+ DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+ DBUS_FIRMWARE,
+ DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+ INVALID_SPEED = -1,
+ LOW_SPEED = 1, /* USB 1.1: 1.5 Mbps */
+ FULL_SPEED, /* USB 1.1: 12 Mbps */
+ HIGH_SPEED, /* USB 2.0: 480 Mbps */
+ SUPER_SPEED, /* USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+ int bustype;
+ int vid;
+ int pid;
+ int devid;
+ int chiprev; /* chip revsion number */
+ int mtu;
+ int nchan; /* Data Channels */
+ int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+ uint32 rx_errors;
+ uint32 tx_errors;
+ uint32 rx_dropped;
+ uint32 tx_dropped;
+} dbus_stats_t;
+
+/*
+ * Configurable BUS parameters
+ */
+typedef struct {
+ bool rxctl_deferrespok;
+} dbus_config_t;
+
+/*
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+ uint8 *fw;
+ int fwlen;
+ uint8 *vars;
+ int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/* Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+ void (*send_complete)(void *cbarg, void *info, int status);
+ void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+ void (*recv_pkt)(void *cbarg, void *pkt);
+ void (*txflowcontrol)(void *cbarg, bool onoff);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+ void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+ void (*detach)(struct dbus_pub *pub, void *bus);
+
+ int (*up)(void *bus);
+ int (*down)(void *bus);
+ int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+ int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*send_ctl)(void *bus, uint8 *buf, int len);
+ int (*recv_ctl)(void *bus, uint8 *buf, int len);
+ int (*get_stats)(void *bus, dbus_stats_t *stats);
+ int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+ int (*pnp)(void *bus, int evnt);
+ int (*remove)(void *bus);
+ int (*resume)(void *bus);
+ int (*suspend)(void *bus);
+ int (*stop)(void *bus);
+ int (*reset)(void *bus);
+
+ /* Access to bus buffers directly */
+ void *(*pktget)(void *bus, int len);
+ void (*pktfree)(void *bus, void *pkt);
+
+ int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+ bool set);
+ void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+ int (*set_config)(void *bus, dbus_config_t *config);
+ int (*get_config)(void *bus, dbus_config_t *config);
+
+ bool (*device_exists)(void *bus);
+ bool (*dlneeded)(void *bus);
+ int (*dlstart)(void *bus, uint8 *fw, int len);
+ int (*dlrun)(void *bus);
+ bool (*recv_needed)(void *bus);
+
+ void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+ void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+ int (*tx_timer_init)(void *bus);
+ int (*tx_timer_start)(void *bus, uint timeout);
+ int (*tx_timer_stop)(void *bus);
+
+ int (*sched_dpc)(void *bus);
+ int (*lock)(void *bus);
+ int (*unlock)(void *bus);
+ int (*sched_probe_cb)(void *bus);
+
+ int (*shutdown)(void *bus);
+
+ int (*recv_stop)(void *bus);
+ int (*recv_resume)(void *bus);
+
+ int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+ int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+ /* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+ struct osl_info *osh;
+ dbus_stats_t stats;
+ dbus_attrib_t attrib;
+ enum dbus_state busstate;
+ DEVICE_SPEED device_speed;
+ int ntxq, nrxq, rxsize;
+ void *bus;
+ struct shared_info *sh;
+ void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define ALIGNED_LOCAL_VARIABLE(var, align) \
+ uint8 buffer[SDALIGN+64]; \
+ uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ * maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ * For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ void *param1, void *param2);
+extern int dbus_deregister(void);
+
+extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq,
+ void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh);
+extern void dbus_detach(dbus_pub_t *pub);
+
+extern int dbus_up(dbus_pub_t *pub);
+extern int dbus_down(dbus_pub_t *pub);
+extern int dbus_stop(dbus_pub_t *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+extern int dbus_iovar_op(dbus_pub_t *pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/* IO Request Block (IRB) */
+typedef struct dbus_irb {
+ struct dbus_irb *next; /* it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int buf_len;
+ int actual_len;
+ void *pkt;
+ void *info;
+ void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int len;
+ void *pkt;
+ int retry_count;
+ void *info;
+ void *arg;
+ void *send_buf; /* linear bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/* DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+ void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+ void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+ void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ bool (*isr)(void *cbarg, bool *wantdpc);
+ bool (*dpc)(void *cbarg, bool bounded);
+ void (*watchdog)(void *cbarg);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+ struct dbus_irb* (*getirb)(void *cbarg, bool send);
+ void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, uint8 **fw, int *fwlen, int type,
+ uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ /* Backward compatibility */
+ typedef unsigned int gfp_t;
+
+ #define dma_pool pci_pool
+ #define dma_pool_create(name, dev, size, align, alloc) \
+ pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+ #define dma_pool_destroy(pool) pci_pool_destroy(pool)
+ #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+ #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+ #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir)
+ #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir)
+ #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+ #define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+ #define hc32_to_cpu(x) le32_to_cpu(x)
+ #define cpu_to_hc32(x) cpu_to_le32(x)
+ typedef unsigned int __hc32;
+#else
+ #error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH 0x31
+#define EHCI_SET_EP_BYPASS EHCI_FASTPATH
+#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1)
+#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2)
+#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3)
+#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4)
+#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS 5
+#define EHCI_QTD_ALIGN 32
+#define EHCI_BULK_PACKET_SIZE 512
+#define EHCI_QTD_XACTERR_MAX 32
+
+struct ehci_qtd {
+ /* Hardware map */
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff)
+#define EHCI_QTD_IOC 0x00008000
+#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x) ((x) << 10)
+#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x) ((x) << 8)
+#define EHCI_QTD_ACTIVE 0x80
+#define EHCI_QTD_HALTED 0x40
+#define EHCI_QTD_BUFERR 0x20
+#define EHCI_QTD_BABBLE 0x10
+#define EHCI_QTD_XACTERR 0x08
+#define EHCI_QTD_MISSEDMICRO 0x04
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+ /* Implementation extension */
+ dma_addr_t qtd_self; /* own hardware address */
+ struct ehci_qtd *obj_next; /* software link to the next QTD */
+ void *rpc; /* pointer to the rpc buffer */
+ size_t length; /* length of the data in the buffer */
+ void *buff; /* pointer to the reassembly buffer */
+ int xacterrs; /* retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/* Queue Head */
+/* NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible
+ */
+struct ehci_qh {
+ /* Hardware map */
+ volatile uint32_t qh_link;
+ volatile uint32_t qh_endp;
+ volatile uint32_t qh_endphub;
+ volatile uint32_t qh_curqtd;
+
+ /* QTD overlay */
+ volatile uint32_t ow_next;
+ volatile uint32_t ow_altnext;
+ volatile uint32_t ow_status;
+ volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS];
+ volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+ /* Extension (should match the kernel layout) */
+ dma_addr_t unused0;
+ void *unused1;
+ struct list_head unused2;
+ struct ehci_qtd *dummy;
+ struct ehci_qh *unused3;
+
+ struct ehci_hcd *unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct kref unused5;
+ unsigned unused6;
+
+ uint8_t unused7;
+
+ /* periodic schedule info */
+ uint8_t unused8;
+ uint8_t unused9;
+ uint8_t unused10;
+ uint16_t unused11;
+ uint16_t unused12;
+ uint16_t unused13;
+ struct usb_device *unused14;
+#else
+ unsigned unused5;
+
+ u8 unused6;
+
+ /* periodic schedule info */
+ u8 unused7;
+ u8 unused8;
+ u8 unused9;
+ unsigned short unused10;
+ unsigned short unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+ struct usb_device *unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+ struct ehci_qtd *first_qtd;
+ /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+ /* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev { /* usb_device.hcpriv points to this */
+ struct list_head unused0;
+ struct list_head unused1;
+
+ /* array of QH pointers */
+ void *ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc,
+ int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+ int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 000000000000..03c44ad867d6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,135 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h 327460 2012-04-13 18:38:41Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define _dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ bool set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+ uint driver; /* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+ BUS_TYPE_USB = 0, /* for USB dongles */
+ BUS_TYPE_SDIO /* for SDIO dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION 1
+
+#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC 0
+#define DHD_GET_VERSION 1
+#define DHD_GET_VAR 2
+#define DHD_SET_VAR 3
+
+/* message levels */
+#define DHD_ERROR_VAL 0x0001
+#define DHD_TRACE_VAL 0x0002
+#define DHD_INFO_VAL 0x0004
+#define DHD_DATA_VAL 0x0008
+#define DHD_CTL_VAL 0x0010
+#define DHD_TIMER_VAL 0x0020
+#define DHD_HDRS_VAL 0x0040
+#define DHD_BYTES_VAL 0x0080
+#define DHD_INTR_VAL 0x0100
+#define DHD_LOG_VAL 0x0200
+#define DHD_GLOM_VAL 0x0400
+#define DHD_EVENT_VAL 0x0800
+#define DHD_BTA_VAL 0x1000
+#if 0 && (NDISVER >= 0x0630) && 1
+#define DHD_SCAN_VAL 0x2000
+#else
+#define DHD_ISCAN_VAL 0x2000
+#endif
+#define DHD_ARPOE_VAL 0x4000
+#define DHD_REORDER_VAL 0x8000
+#define DHD_WL_VAL 0x10000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+ uint version; /* To allow structure change tracking */
+ uint freq; /* Max ticks between tx/rx attempts */
+ uint count; /* Test packets to send/rcv each attempt */
+ uint print; /* Print counts every <print> attempts */
+ uint total; /* Total packets (or bursts) */
+ uint minlen; /* Minimum length of packets to send */
+ uint maxlen; /* Maximum length of packets to send */
+ uint numsent; /* Count of test packets sent */
+ uint numrcvd; /* Count of test packets received */
+ uint numfail; /* Count of test send failures */
+ uint mode; /* Test mode (type of test packets) */
+ uint stop; /* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE (-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 000000000000..8fcf87cb9e0e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define EPI_MAJOR_VERSION 1
+
+#define EPI_MINOR_VERSION 27
+
+#define EPI_RC_NUMBER 0
+
+#define EPI_INCREMENTAL_NUMBER 0
+
+#define EPI_BUILD_NUMBER 0
+
+#define EPI_VERSION 1, 27, 0, 0
+
+#define EPI_VERSION_NUM 0x011b0000
+
+#define EPI_VERSION_DEV 1.27.0
+
+
+#define EPI_VERSION_STR "1.27 (r329705)"
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 000000000000..c41def6c7d8a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,36 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
new file mode 100644
index 000000000000..90d979929381
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h 261365 2011-05-24 20:42:23Z $
+ */
+
+#ifndef _hndrte_armtrap_h
+#define _hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0; /* a1 */
+ uint32 r1; /* a2 */
+ uint32 r2; /* a3 */
+ uint32 r3; /* a4 */
+ uint32 r4; /* v1 */
+ uint32 r5; /* v2 */
+ uint32 r6; /* v3 */
+ uint32 r7; /* v4 */
+ uint32 r8; /* v5 */
+ uint32 r9; /* sb/v6 */
+ uint32 r10; /* sl/v7 */
+ uint32 r11; /* fp/v8 */
+ uint32 r12; /* ip */
+ uint32 r13; /* sp */
+ uint32 r14; /* lr */
+ uint32 pc; /* r15 */
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_cons.h b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
new file mode 100644
index 000000000000..57abbbd5b67b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
@@ -0,0 +1,67 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h 300516 2011-12-04 17:39:44Z $
+ */
+#ifndef _HNDRTE_CONS_H
+#define _HNDRTE_CONS_H
+
+#include <typedefs.h>
+
+#define CBUF_LEN (128)
+
+#define LOG_BUF_LEN 1024
+
+typedef struct {
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ char *_buf_compat; /* redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hndrte_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hndrte_cons_t;
+
+#endif /* _HNDRTE_CONS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 000000000000..66640c3b0cbd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,235 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _HNDSOC_H
+#define _HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
+#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
+#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+
+#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
+#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+
+#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
+#define SI_FASTRAM_SWAPPED 0x19800000
+
+#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
+#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
+#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */
+#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
+#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
+#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */
+#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */
+#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */
+#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
+#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
+#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
+#define EROM_CORE_ID 0x366 /* EROM core ID */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+#define CC_4706_CORE_ID 0x500 /* chipcommon core */
+#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */
+#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */
+#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */
+#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */
+#define ALTA_CORE_ID 0x534 /* I2S core */
+#define DDR23_PHY_CORE_ID 0x5dd
+
+#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */
+#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define SI_CC_IDX 0
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+#define SOCI_UBUS 2
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */
+#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
+#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT 8
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
+#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
+#define CCS_ERSRC_STS_SHIFT 24
+
+#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size */
+#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
+#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
+#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
+#define BISZ_TXTST_IDX 1 /* 1: text start */
+#define BISZ_TXTEND_IDX 2 /* 2: text end */
+#define BISZ_DATAST_IDX 3 /* 3: data start */
+#define BISZ_DATAEND_IDX 4 /* 4: data end */
+#define BISZ_BSSST_IDX 5 /* 5: bss start */
+#define BISZ_BSSEND_IDX 6 /* 6: bss end */
+#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 000000000000..257aaf642f8a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,422 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h 326751 2012-04-10 20:13:19Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+
+
+#ifdef BCMDRIVER
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+
+extern uint32 g_assert_type;
+
+
+#if defined(BCMASSERT_LOG)
+ #define ASSERT(exp) \
+ do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+ #ifdef __GNUC__
+ #define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+ #if GCC_VERSION > 30100
+ #define ASSERT(exp) do {} while (0)
+ #else
+
+ #define ASSERT(exp)
+ #endif
+ #endif
+#endif
+
+
+#define OSL_DELAY(usec) osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define OSL_PCI_READ_CONFIG(osh, offset, size) \
+ osl_pci_read_config((osh), (offset), (size))
+#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+ osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+
+typedef struct {
+ bool pkttag;
+ uint pktalloced;
+ bool mmbus;
+ pktfree_cb_fn_t tx_fn;
+ void *tx_ctx;
+ void *unused[3];
+} osl_pubinfo_t;
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+ } while (0)
+
+
+
+#define BUS_SWAP32(v) (v)
+
+ #define MALLOC(osh, size) osl_malloc((osh), (size))
+ #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
+ #define MALLOCED(osh) osl_malloced((osh))
+ extern void *osl_malloc(osl_t *osh, uint size);
+ extern void osl_mfree(osl_t *osh, void *addr, uint size);
+ extern uint osl_malloced(osl_t *osh);
+
+#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC)
+#define NATIVE_MFREE(osh, addr, size) kfree(addr)
+
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
+#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define DMA_TX 1
+#define DMA_RX 2
+
+
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+ #include <bcmsdh.h>
+ #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+ #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+ mmap_op else bus_op
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+ mmap_op : bus_op
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define PKTBUFSZ 2048
+
+
+#include <linuxver.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
+#define printf(fmt, args...) printk(fmt , ## args)
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ BCM_REFERENCE(osh); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+
+#define W_REG(osh, r, v) do { \
+ BCM_REFERENCE(osh); \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+#define OSL_UNCACHED(va) ((void *)va)
+#define OSL_CACHED(va) ((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz)
+#define OSL_PREF_RANGE_ST(va, sz)
+
+
+#if defined(__i386__)
+#define OSL_GETCYCLES(x) rdtscl((x))
+#else
+#define OSL_GETCYCLES(x) ((x) = 0)
+#endif
+
+
+#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif
+#define REG_UNMAP(va) iounmap((va))
+
+
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define BZERO_SM(r, len) memset((r), '\0', (len))
+
+
+#include <linuxver.h>
+
+
+#define PKTGET(osh, len, send) osl_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#define PKTLIST_DUMP(osh, buf)
+#define PKTDBG_TRACE(osh, pkt, bit)
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#endif
+#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
+#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
+#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
+#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
+#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
+#define PKTPOOL(osh, skb) FALSE
+#define PKTSHRINK(osh, m) (m)
+
+#ifdef CTFPOOL
+#define CTFPOOL_REFILL_THRESH 3
+typedef struct ctfpool {
+ void *head;
+ spinlock_t lock;
+ uint max_obj;
+ uint curr_obj;
+ uint obj_size;
+ uint refills;
+ uint fast_allocs;
+ uint fast_frees;
+ uint slow_allocs;
+} ctfpool_t;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define FASTBUF (1 << 16)
+#define CTFBUF (1 << 17)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
+#else
+#define FASTBUF (1 << 0)
+#define CTFBUF (1 << 1)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
+#endif
+
+#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk)
+#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#endif
+
+
+#ifdef HNDCTF
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define SKIPCT (1 << 18)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len & SKIPCT)
+#else
+#define SKIPCT (1 << 2)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused & SKIPCT)
+#endif
+#else
+#define PKTSETSKIPCT(osh, skb)
+#define PKTCLRSKIPCT(osh, skb)
+#define PKTSKIPCT(osh, skb)
+#endif
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction))
+
+#ifdef PKTC
+
+struct chain_node {
+ struct sk_buff *link;
+ unsigned int flags:3, pkts:9, bytes:20;
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#define CHAIN_NODE(skb) ((struct chain_node*)&(((struct sk_buff*)skb)->tstamp))
+#else
+#define CHAIN_NODE(skb) ((struct chain_node*)&(((struct sk_buff*)skb)->stamp))
+#endif
+
+#define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts)
+#define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes)
+#define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags)
+#define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c) & ((1 << 9) - 1))
+#define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l) & ((1 << 20) - 1))
+#define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb))
+#define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb))
+#define PKTCLINK(skb) (CHAIN_NODE(skb)->link)
+#define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define PKTISCHAINED(skb) (PKTCLINK(skb) != NULL)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for (; (skb) != NULL; (skb) = (nskb)) \
+ if ((nskb) = PKTCLINK(skb), PKTSETCLINK((skb), NULL), 1)
+#define PKTCFREE(osh, skb, send) \
+do { \
+ void *nskb; \
+ ASSERT((skb) != NULL); \
+ FOREACH_CHAINED_PKT((skb), nskb) { \
+ PKTFREE((osh), (skb), (send)); \
+ } \
+} while (0)
+#endif
+
+#else
+
+
+
+ #define ASSERT(exp) do {} while (0)
+
+
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+
+#include <string.h>
+
+
+#include <stdio.h>
+
+
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 000000000000..c77ce558f5ca
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,608 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h 315203 2012-02-16 00:58:00Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
+#else
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+ (RHEL_MAJOR == 5))
+
+typedef void (*work_func_t)(void *work);
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef CONFIG_NET_RADIO
+#define CONFIG_WIRELESS_EXT
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x) x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev) (dev)->sysdata
+#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+ unsigned int vendor, device;
+ unsigned int subvendor, subdevice;
+ unsigned int class, class_mask;
+ unsigned long driver_data;
+};
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *dev,
+ const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *dev);
+ void (*suspend)(struct pci_dev *dev);
+ void (*resume)(struct pci_dev *dev);
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x) __initcall(x);
+#define module_exit(x) __exitcall(x);
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a) dev_kfree_skb(a)
+#define netif_down(dev) do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+}
+
+#define netif_queue_stopped(dev) (dev)->tbusy
+#define netif_running(dev) (dev)->start
+
+#endif
+
+#define netif_device_attach(dev) netif_start_queue(dev)
+#define netif_device_detach(dev) netif_stop_queue(dev)
+
+
+#define tasklet_struct tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+ queue_task(tasklet, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+ void (*func)(unsigned long),
+ unsigned long data)
+{
+ tasklet->next = NULL;
+ tasklet->sync = 0;
+ tasklet->routine = (void (*)(void *))func;
+ tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet) { do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+#endif
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define PCI_SAVE_STATE(a, b) pci_save_state(a)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
+#else
+#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &buffer[i]);
+ }
+ return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(dev, i * 4, buffer[i]);
+ }
+
+ else {
+ for (i = 0; i < 6; i ++)
+ pci_write_config_dword(dev,
+ PCI_BASE_ADDRESS_0 + (i * 4),
+ pci_resource_start(dev, i));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+ return 0;
+}
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#else
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev) kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+ void *parent;
+ struct task_struct *p_task;
+ long thr_pid;
+ int prio;
+ struct semaphore sema;
+ int terminated;
+ struct completion completed;
+} tsk_ctl_t;
+
+
+
+
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags) \
+{ \
+ sema_init(&((tsk_ctl)->sema), 0); \
+ init_completion(&((tsk_ctl)->completed)); \
+ (tsk_ctl)->parent = owner; \
+ (tsk_ctl)->terminated = FALSE; \
+ (tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \
+ if ((tsk_ctl)->thr_pid > 0) \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ up(&((tsk_ctl)->sema)); \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->thr_pid = -1; \
+}
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid; \
+pid = find_get_pid((pid_t)nr); \
+tsk = pid_task(pid, PIDTYPE_PID); \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+ struct task_struct *tsk; \
+ tsk = find_task_by_vpid(pid); \
+ if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+ kill_proc(pid, sig, 1); \
+}
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev) (dev->priv)
+#else
+#define DEV_PRIV(dev) netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p) wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 000000000000..c1eca68a602d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY 128 /* Max options */
+typedef struct miniopt {
+
+ /* These are persistent after miniopt_init() */
+ const char* name; /* name for prompt in error strings */
+ const char* flags; /* option chars that take no args */
+ bool longflags; /* long options may be flags */
+ bool opt_end; /* at end of options (passed a "--") */
+
+ /* These are per-call to miniopt() */
+
+ int consumed; /* number of argv entries cosumed in
+ * the most recent call to miniopt()
+ */
+ bool positional;
+ bool good_int; /* 'val' member is the result of a sucessful
+ * strtol conversion of the option value
+ */
+ char opt;
+ char key[MINIOPT_MAXKEY];
+ char* valstr; /* positional param, or value for the option,
+ * or null if the option had
+ * no accompanying value
+ */
+ uint uval; /* strtol translation of valstr */
+ int val; /* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* MINI_OPT_H */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 000000000000..7c5fd8106f3f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,74 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h 281527 2011-09-02 17:12:53Z $
+ */
+
+#ifndef _MSGTRACE_H
+#define _MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+ uint8 version;
+ uint8 spare;
+ uint16 len; /* Len of the trace */
+ uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost
+ * because of DMA error or a bus reset (ex: SDIO Func2)
+ */
+ uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */
+ uint32 discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+ uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 000000000000..ca171d8acd69
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,88 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h 320905 2012-03-13 15:33:25Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ 32
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)
+#endif
+
+#define PKTCTFMAP(osh, p)
+
+
+
+#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif
+
+#if !defined(PKTC)
+#define PKTCCNT(skb) (0)
+#define PKTCLEN(skb) (0)
+#define PKTCFLAGS(skb) (0)
+#define PKTCSETCNT(skb, c)
+#define PKTCSETLEN(skb, l)
+#define PKTCSETFLAG(skb, fb)
+#define PKTCCLRFLAG(skb, fb)
+#define PKTCLINK(skb) PKTLINK(skb)
+#define PKTSETCLINK(skb, x) PKTSETLINK((skb), (x))
+#define PKTISCHAINED(skb) FALSE
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define PKTCFREE PKTFREE
+#endif
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 000000000000..24ff4672734f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,53 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #undef BWL_PACKED_SECTION
+#else
+ #error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef BWL_PRE_PACKED_STRUCT
+#undef BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 000000000000..7fce0ddfd56e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,60 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h 286783 2011-09-29 06:18:57Z $
+ */
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #error "BWL_PACKED_SECTION is already defined!"
+#else
+ #define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__) || defined(__lint)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT __attribute__ ((packed))
+#elif defined(__CC_ARM)
+ #define BWL_PRE_PACKED_STRUCT __packed
+ #define BWL_POST_PACKED_STRUCT
+#else
+ #error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 000000000000..5f7df6a7a7e6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,90 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _h_pcicfg_
+#define _h_pcicfg_
+
+
+#define PCI_CFG_VID 0
+#define PCI_CFG_DID 2
+#define PCI_CFG_CMD 4
+#define PCI_CFG_STAT 6
+#define PCI_CFG_REV 8
+#define PCI_CFG_PROGIF 9
+#define PCI_CFG_SUBCL 0xa
+#define PCI_CFG_BASECL 0xb
+#define PCI_CFG_CLSZ 0xc
+#define PCI_CFG_LATTIM 0xd
+#define PCI_CFG_HDR 0xe
+#define PCI_CFG_BIST 0xf
+#define PCI_CFG_BAR0 0x10
+#define PCI_CFG_BAR1 0x14
+#define PCI_CFG_BAR2 0x18
+#define PCI_CFG_BAR3 0x1c
+#define PCI_CFG_BAR4 0x20
+#define PCI_CFG_BAR5 0x24
+#define PCI_CFG_CIS 0x28
+#define PCI_CFG_SVID 0x2c
+#define PCI_CFG_SSID 0x2e
+#define PCI_CFG_ROMBAR 0x30
+#define PCI_CFG_CAPPTR 0x34
+#define PCI_CFG_INT 0x3c
+#define PCI_CFG_PIN 0x3d
+#define PCI_CFG_MINGNT 0x3e
+#define PCI_CFG_MAXLAT 0x3f
+#define PCI_BAR0_WIN 0x80
+#define PCI_BAR1_WIN 0x84
+#define PCI_SPROM_CONTROL 0x88
+#define PCI_BAR1_CONTROL 0x8c
+#define PCI_INT_STATUS 0x90
+#define PCI_INT_MASK 0x94
+#define PCI_TO_SB_MB 0x98
+#define PCI_BACKPLANE_ADDR 0xa0
+#define PCI_BACKPLANE_DATA 0xa4
+#define PCI_CLK_CTL_ST 0xa8
+#define PCI_BAR0_WIN2 0xac
+#define PCI_GPIO_IN 0xb0
+#define PCI_GPIO_OUT 0xb4
+#define PCI_GPIO_OUTEN 0xb8
+
+#define PCI_BAR0_SHADOW_OFFSET (2 * 1024)
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+
+#define PCIE2_BAR0_WIN2 0x70
+#define PCIE2_BAR0_CORE2_WIN 0x74
+#define PCIE2_BAR0_CORE2_WIN2 0x78
+
+#define PCI_BAR0_WINSZ (16 * 1024)
+
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+#define PCI_16KBB0_WINSZ (16 * 1024)
+
+
+#define PCI_CONFIG_SPACE_SIZE 256
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 000000000000..bd0942eb9648
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,2253 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h 328824 2012-04-20 22:51:46Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US 1024
+
+
+#define DOT11_A3_HDR_LEN 24
+#define DOT11_A4_HDR_LEN 30
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
+#define DOT11_FCS_LEN 4
+#define DOT11_ICV_LEN 4
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_HTC_LEN 4
+
+#define DOT11_KEY_INDEX_SHIFT 6
+#define DOT11_IV_LEN 4
+#define DOT11_IV_TKIP_LEN 8
+#define DOT11_IV_AES_OCB_LEN 4
+#define DOT11_IV_AES_CCM_LEN 8
+#define DOT11_IV_MAX_LEN 8
+
+
+#define DOT11_MAX_MPDU_BODY_LEN 2304
+
+#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ DOT11_IV_AES_CCM_LEN + \
+ DOT11_MAX_MPDU_BODY_LEN + \
+ DOT11_ICV_LEN + \
+ DOT11_FCS_LEN)
+
+#define DOT11_MAX_SSID_LEN 32
+
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_MAX_RTS_LEN 2347
+
+
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_MAX_FRAG_LEN 2346
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+
+
+#define DOT11_MIN_DTIM_PERIOD 1
+#define DOT11_MAX_DTIM_PERIOD 0xFF
+
+
+#define DOT11_LLC_SNAP_HDR_LEN 8
+#define DOT11_OUI_LEN 3
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+ uint8 dsap;
+ uint8 ssap;
+ uint8 ctl;
+ uint8 oui[DOT11_OUI_LEN];
+ uint16 type;
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr a1;
+ struct ether_addr a2;
+ struct ether_addr a3;
+ uint16 seq;
+ struct ether_addr a4;
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr bssid;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+#define DOT11_ACTION_VS_HDR_LEN 6
+
+#define BCM_ACTION_OUI_BYTE0 0x00
+#define BCM_ACTION_OUI_BYTE1 0x90
+#define BCM_ACTION_OUI_BYTE2 0x4c
+
+
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001
+#define DOT11_BA_CTL_POLICY_MASK 0x0001
+
+#define DOT11_BA_CTL_MTID 0x0002
+#define DOT11_BA_CTL_COMPRESSED 0x0004
+
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6
+
+#define DOT11_BA_CTL_TID_MASK 0xF000
+#define DOT11_BA_CTL_TID_SHIFT 12
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+ uint16 bar_control;
+ uint16 seqnum;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN 4
+
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_BA_CMP_BITMAP_LEN 8
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+ uint16 ba_control;
+ uint16 seqnum;
+ uint8 bitmap[DOT11_BA_BITMAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr da;
+ struct ether_addr sa;
+ struct ether_addr bssid;
+ uint16 seq;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_HDR_LEN 24
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+ uint32 timestamp[2];
+ uint16 beacon_interval;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN 12
+#define DOT11_BCN_PRB_FIXED_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+ uint16 alg;
+ uint16 seq;
+ uint16 status;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+ uint16 capability;
+ uint16 listen;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+ uint16 capability;
+ uint16 listen;
+ struct ether_addr ap;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+ uint16 capability;
+ uint16 status;
+ uint16 aid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+ uint8 category;
+ uint8 action;
+ uint8 ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+ uint8 category;
+ uint8 action;
+ uint8 control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+ uint8 category;
+ uint8 action;
+ uint16 id;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE 1
+#define SM_PWRSAVE_MODE 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+ uint8 id;
+ uint8 len;
+ uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+ uint8 min;
+ uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+ uint8 id;
+ uint8 len;
+ uint8 tx_pwr;
+ uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+ uint8 id;
+ uint8 len;
+ uint8 first_channel;
+ uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+ uint8 id;
+ uint8 len;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN 5
+#define BRCM_EXTCH_IE_TYPE 53
+#define DOT11_EXTCH_IE_LEN 1
+#define DOT11_EXT_CH_MASK 0x03
+#define DOT11_EXT_CH_UPPER 0x01
+#define DOT11_EXT_CH_LOWER 0x03
+#define DOT11_EXT_CH_NONE 0x00
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+ uint8 category;
+ uint8 action;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+ uint8 id;
+ uint8 len;
+ uint8 mode;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3
+
+#define DOT11_CSA_MODE_ADVISORY 0
+#define DOT11_CSA_MODE_NO_TX 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+ uint8 category;
+ uint8 action;
+ dot11_chan_switch_ie_t chan_switch_ie;
+ dot11_brcm_extch_ie_t extch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+ uint8 mode;
+ uint8 reg;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+ uint8 id;
+ uint8 len;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ dot11_ext_csa_ie_t chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+ uint8 id;
+ uint8 len;
+ uint8 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1
+
+#define DOT11_OBSS_COEX_INFO_REQ 0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+ uint8 id;
+ uint8 len;
+ uint8 regclass;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_MAX 7
+#define DOT11_EXTCAP_LEN_COEX 1
+#define DOT11_EXTCAP_LEN_BT 3
+#define DOT11_EXTCAP_LEN_IW 4
+#define DOT11_EXTCAP_LEN_SI 6
+
+#define DOT11_EXTCAP_LEN_TDLS 5
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+ uint8 extcap[DOT11_EXTCAP_LEN_TDLS];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+
+#define TDLS_CAP_TDLS 37
+#define TDLS_CAP_PU_BUFFER_STA 28
+#define TDLS_CAP_PEER_PSM 20
+#define TDLS_CAP_CH_SW 30
+#define TDLS_CAP_PROH 38
+#define TDLS_CAP_CH_SW_PROH 39
+
+#define TDLS_CAP_MAX_BIT 39
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC 0
+#define DOT11_MEASURE_TYPE_CCA 1
+#define DOT11_MEASURE_TYPE_RPI 2
+#define DOT11_MEASURE_TYPE_CHLOAD 3
+#define DOT11_MEASURE_TYPE_NOISE 4
+#define DOT11_MEASURE_TYPE_BEACON 5
+#define DOT11_MEASURE_TYPE_FRAME 6
+#define DOT11_MEASURE_TYPE_STATS 7
+#define DOT11_MEASURE_TYPE_LCI 8
+#define DOT11_MEASURE_TYPE_TXSTREAM 9
+#define DOT11_MEASURE_TYPE_PAUSE 255
+
+
+#define DOT11_MEASURE_MODE_PARALLEL (1<<0)
+#define DOT11_MEASURE_MODE_ENABLE (1<<1)
+#define DOT11_MEASURE_MODE_REQUEST (1<<2)
+#define DOT11_MEASURE_MODE_REPORT (1<<3)
+#define DOT11_MEASURE_MODE_DUR (1<<4)
+
+#define DOT11_MEASURE_MODE_LATE (1<<0)
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1)
+#define DOT11_MEASURE_MODE_REFUSED (1<<2)
+
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0))
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1))
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2))
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3))
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4))
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+ } BWL_POST_PACKED_STRUCT basic;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+ uint8 id;
+ uint8 len;
+ uint8 count;
+ uint8 period;
+ uint16 duration;
+ uint16 offset;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+ uint8 channel;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+ uint8 id;
+ uint8 len;
+ uint8 eaddr[ETHER_ADDR_LEN];
+ uint8 interval;
+ chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI "\x00\x50\xf2"
+#define WME_OUI_LEN 3
+#define WME_OUI_TYPE 2
+#define WME_TYPE 2
+#define WME_SUBTYPE_IE 0
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_SUBTYPE_TSPEC 2
+#define WME_VER 1
+
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+#define AC_COUNT 4
+
+typedef uint8 ac_bitmap_t;
+
+#define AC_BITMAP_NONE 0x0
+#define AC_BITMAP_ALL 0xf
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+ uint8 ACI;
+ uint8 ECW;
+ uint16 TXOP;
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN 24
+
+
+#define WME_QI_AP_APSD_MASK 0x80
+#define WME_QI_AP_APSD_SHIFT 7
+#define WME_QI_AP_COUNT_MASK 0x0f
+#define WME_QI_AP_COUNT_SHIFT 0
+
+
+#define WME_QI_STA_MAXSPLEN_MASK 0x60
+#define WME_QI_STA_MAXSPLEN_SHIFT 5
+#define WME_QI_STA_APSD_ALL_MASK 0xf
+#define WME_QI_STA_APSD_ALL_SHIFT 0
+#define WME_QI_STA_APSD_BE_MASK 0x8
+#define WME_QI_STA_APSD_BE_SHIFT 3
+#define WME_QI_STA_APSD_BK_MASK 0x4
+#define WME_QI_STA_APSD_BK_SHIFT 2
+#define WME_QI_STA_APSD_VI_MASK 0x2
+#define WME_QI_STA_APSD_VI_SHIFT 1
+#define WME_QI_STA_APSD_VO_MASK 0x1
+#define WME_QI_STA_APSD_VO_SHIFT 0
+
+
+#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MAX 15
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_ACM_MASK 0x10
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_AIFSN_SHIFT 12
+
+
+#define EDCF_ECW_MIN 0
+#define EDCF_ECW_MAX 15
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_MASK 0xf0
+#define EDCF_ECWMAX_SHIFT 4
+
+
+#define EDCF_TXOP_MIN 0
+#define EDCF_TXOP_MAX 65535
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA 0x02
+
+
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VO_TXOP_STA 0x002f
+
+
+#define EDCF_AC_BE_ACI_AP 0x03
+#define EDCF_AC_BE_ECW_AP 0x64
+#define EDCF_AC_BE_TXOP_AP 0x0000
+#define EDCF_AC_BK_ACI_AP 0x27
+#define EDCF_AC_BK_ECW_AP 0xA4
+#define EDCF_AC_BK_TXOP_AP 0x0000
+#define EDCF_AC_VI_ACI_AP 0x41
+#define EDCF_AC_VI_ECW_AP 0x43
+#define EDCF_AC_VI_TXOP_AP 0x005e
+#define EDCF_AC_VO_ACI_AP 0x61
+#define EDCF_AC_VO_ECW_AP 0x32
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+ uint8 id;
+ uint8 length;
+ uint16 station_count;
+ uint8 channel_utilization;
+ uint16 aac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 7
+
+
+#define FIXED_MSDU_SIZE 0x8000
+#define MSDU_SIZE_MASK 0x7fff
+
+
+
+#define INTEGER_SHIFT 13
+#define FRACTION_MASK 0x1FFF
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+ uint8 ti_type;
+ uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE 1
+#define TI_TYPE_KEY_LIFETIME 2
+
+
+#define WME_ADDTS_REQUEST 0
+#define WME_ADDTS_RESPONSE 1
+#define WME_DELTS_REQUEST 2
+
+
+#define WME_ADMISSION_ACCEPTED 0
+#define WME_INVALID_PARAMETERS 1
+#define WME_ADMISSION_REFUSED 3
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM 0
+#define DOT11_SHARED_KEY 1
+#define DOT11_FAST_BSS 2
+#define DOT11_CHALLENGE_LEN 128
+
+
+#define FC_PVER_MASK 0x3
+#define FC_PVER_SHIFT 0
+#define FC_TYPE_MASK 0xC
+#define FC_TYPE_SHIFT 2
+#define FC_SUBTYPE_MASK 0xF0
+#define FC_SUBTYPE_SHIFT 4
+#define FC_TODS 0x100
+#define FC_TODS_SHIFT 8
+#define FC_FROMDS 0x200
+#define FC_FROMDS_SHIFT 9
+#define FC_MOREFRAG 0x400
+#define FC_MOREFRAG_SHIFT 10
+#define FC_RETRY 0x800
+#define FC_RETRY_SHIFT 11
+#define FC_PM 0x1000
+#define FC_PM_SHIFT 12
+#define FC_MOREDATA 0x2000
+#define FC_MOREDATA_SHIFT 13
+#define FC_WEP 0x4000
+#define FC_WEP_SHIFT 14
+#define FC_ORDER 0x8000
+#define FC_ORDER_SHIFT 15
+
+
+#define SEQNUM_SHIFT 4
+#define SEQNUM_MAX 0x1000
+#define FRAGNUM_MASK 0xF
+
+
+
+
+#define FC_TYPE_MNG 0
+#define FC_TYPE_CTL 1
+#define FC_TYPE_DATA 2
+
+
+#define FC_SUBTYPE_ASSOC_REQ 0
+#define FC_SUBTYPE_ASSOC_RESP 1
+#define FC_SUBTYPE_REASSOC_REQ 2
+#define FC_SUBTYPE_REASSOC_RESP 3
+#define FC_SUBTYPE_PROBE_REQ 4
+#define FC_SUBTYPE_PROBE_RESP 5
+#define FC_SUBTYPE_BEACON 8
+#define FC_SUBTYPE_ATIM 9
+#define FC_SUBTYPE_DISASSOC 10
+#define FC_SUBTYPE_AUTH 11
+#define FC_SUBTYPE_DEAUTH 12
+#define FC_SUBTYPE_ACTION 13
+#define FC_SUBTYPE_ACTION_NOACK 14
+
+
+#define FC_SUBTYPE_CTL_WRAPPER 7
+#define FC_SUBTYPE_BLOCKACK_REQ 8
+#define FC_SUBTYPE_BLOCKACK 9
+#define FC_SUBTYPE_PS_POLL 10
+#define FC_SUBTYPE_RTS 11
+#define FC_SUBTYPE_CTS 12
+#define FC_SUBTYPE_ACK 13
+#define FC_SUBTYPE_CF_END 14
+#define FC_SUBTYPE_CF_END_ACK 15
+
+
+#define FC_SUBTYPE_DATA 0
+#define FC_SUBTYPE_DATA_CF_ACK 1
+#define FC_SUBTYPE_DATA_CF_POLL 2
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3
+#define FC_SUBTYPE_NULL 4
+#define FC_SUBTYPE_CF_ACK 5
+#define FC_SUBTYPE_CF_POLL 6
+#define FC_SUBTYPE_CF_ACK_POLL 7
+#define FC_SUBTYPE_QOS_DATA 8
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11
+#define FC_SUBTYPE_QOS_NULL 12
+#define FC_SUBTYPE_QOS_CF_POLL 14
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15
+
+
+#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
+
+
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK)
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)
+
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)
+
+
+
+
+#define QOS_PRIO_SHIFT 0
+#define QOS_PRIO_MASK 0x0007
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)
+
+
+#define QOS_TID_SHIFT 0
+#define QOS_TID_MASK 0x000f
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)
+
+
+#define QOS_EOSP_SHIFT 4
+#define QOS_EOSP_MASK 0x0010
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)
+
+
+#define QOS_ACK_NORMAL_ACK 0
+#define QOS_ACK_NO_ACK 1
+#define QOS_ACK_NO_EXP_ACK 2
+#define QOS_ACK_BLOCK_ACK 3
+#define QOS_ACK_SHIFT 5
+#define QOS_ACK_MASK 0x0060
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)
+
+
+#define QOS_AMSDU_SHIFT 7
+#define QOS_AMSDU_MASK 0x0080
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN 2
+#define DOT11_MNG_AUTH_SEQ_LEN 2
+#define DOT11_MNG_BEACON_INT_LEN 2
+#define DOT11_MNG_CAP_LEN 2
+#define DOT11_MNG_AP_ADDR_LEN 6
+#define DOT11_MNG_LISTEN_INT_LEN 2
+#define DOT11_MNG_REASON_LEN 2
+#define DOT11_MNG_AID_LEN 2
+#define DOT11_MNG_STATUS_LEN 2
+#define DOT11_MNG_TIMESTAMP_LEN 8
+
+
+#define DOT11_AID_MASK 0x3fff
+
+
+#define DOT11_RC_RESERVED 0
+#define DOT11_RC_UNSPECIFIED 1
+#define DOT11_RC_AUTH_INVAL 2
+#define DOT11_RC_DEAUTH_LEAVING 3
+#define DOT11_RC_INACTIVITY 4
+#define DOT11_RC_BUSY 5
+#define DOT11_RC_INVAL_CLASS_2 6
+#define DOT11_RC_INVAL_CLASS_3 7
+#define DOT11_RC_DISASSOC_LEAVING 8
+#define DOT11_RC_NOT_AUTH 9
+#define DOT11_RC_BAD_PC 10
+#define DOT11_RC_BAD_CHANNELS 11
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS 32
+#define DOT11_RC_INSUFFCIENT_BW 33
+#define DOT11_RC_EXCESSIVE_FRAMES 34
+#define DOT11_RC_TX_OUTSIDE_TXOP 35
+#define DOT11_RC_LEAVING_QBSS 36
+#define DOT11_RC_BAD_MECHANISM 37
+#define DOT11_RC_SETUP_NEEDED 38
+#define DOT11_RC_TIMEOUT 39
+
+#define DOT11_RC_MAX 23
+
+#define DOT11_RC_TDLS_PEER_UNREACH 25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26
+
+
+#define DOT11_SC_SUCCESS 0
+#define DOT11_SC_FAILURE 1
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2
+
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3
+#define DOT11_SC_TDLS_SEC_DISABLED 5
+#define DOT11_SC_LIFETIME_REJ 6
+#define DOT11_SC_NOT_SAME_BSS 7
+#define DOT11_SC_CAP_MISMATCH 10
+#define DOT11_SC_REASSOC_FAIL 11
+#define DOT11_SC_ASSOC_FAIL 12
+#define DOT11_SC_AUTH_MISMATCH 13
+#define DOT11_SC_AUTH_SEQ 14
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15
+#define DOT11_SC_AUTH_TIMEOUT 16
+#define DOT11_SC_ASSOC_BUSY_FAIL 17
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28
+#define DOT11_SC_ASSOC_TRY_LATER 30
+#define DOT11_SC_ASSOC_MFP_VIOLATION 31
+
+#define DOT11_SC_DECLINED 37
+#define DOT11_SC_INVALID_PARAMS 38
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42
+#define DOT11_SC_INVALID_AKMP 43
+#define DOT11_SC_INVALID_RSNIE_CAP 45
+#define DOT11_SC_DLS_NOT_ALLOWED 48
+#define DOT11_SC_INVALID_PMKID 53
+#define DOT11_SC_INVALID_MDID 54
+#define DOT11_SC_INVALID_FTIE 55
+
+#define DOT11_SC_UNEXP_MSG 70
+#define DOT11_SC_INVALID_SNONCE 71
+#define DOT11_SC_INVALID_RSNIE 72
+
+
+#define DOT11_MNG_DS_PARAM_LEN 1
+#define DOT11_MNG_IBSS_PARAM_LEN 2
+
+
+#define DOT11_MNG_TIM_FIXED_LEN 3
+#define DOT11_MNG_TIM_DTIM_COUNT 0
+#define DOT11_MNG_TIM_DTIM_PERIOD 1
+#define DOT11_MNG_TIM_BITMAP_CTL 2
+#define DOT11_MNG_TIM_PVB 3
+
+
+#define TLV_TAG_OFF 0
+#define TLV_LEN_OFF 1
+#define TLV_HDR_LEN 2
+#define TLV_BODY_OFF 2
+
+
+#define DOT11_MNG_SSID_ID 0
+#define DOT11_MNG_RATES_ID 1
+#define DOT11_MNG_FH_PARMS_ID 2
+#define DOT11_MNG_DS_PARMS_ID 3
+#define DOT11_MNG_CF_PARMS_ID 4
+#define DOT11_MNG_TIM_ID 5
+#define DOT11_MNG_IBSS_PARMS_ID 6
+#define DOT11_MNG_COUNTRY_ID 7
+#define DOT11_MNG_HOPPING_PARMS_ID 8
+#define DOT11_MNG_HOPPING_TABLE_ID 9
+#define DOT11_MNG_REQUEST_ID 10
+#define DOT11_MNG_QBSS_LOAD_ID 11
+#define DOT11_MNG_EDCA_PARAM_ID 12
+#define DOT11_MNG_CHALLENGE_ID 16
+#define DOT11_MNG_PWR_CONSTRAINT_ID 32
+#define DOT11_MNG_PWR_CAP_ID 33
+#define DOT11_MNG_TPC_REQUEST_ID 34
+#define DOT11_MNG_TPC_REPORT_ID 35
+#define DOT11_MNG_SUPP_CHANNELS_ID 36
+#define DOT11_MNG_CHANNEL_SWITCH_ID 37
+#define DOT11_MNG_MEASURE_REQUEST_ID 38
+#define DOT11_MNG_MEASURE_REPORT_ID 39
+#define DOT11_MNG_QUIET_ID 40
+#define DOT11_MNG_IBSS_DFS_ID 41
+#define DOT11_MNG_ERP_ID 42
+#define DOT11_MNG_TS_DELAY_ID 43
+#define DOT11_MNG_HT_CAP 45
+#define DOT11_MNG_QOS_CAP_ID 46
+#define DOT11_MNG_NONERP_ID 47
+#define DOT11_MNG_RSN_ID 48
+#define DOT11_MNG_EXT_RATES_ID 50
+#define DOT11_MNG_AP_CHREP_ID 51
+#define DOT11_MNG_NBR_REP_ID 52
+#define DOT11_MNG_MDIE_ID 54
+#define DOT11_MNG_FTIE_ID 55
+#define DOT11_MNG_FT_TI_ID 56
+#define DOT11_MNG_REGCLASS_ID 59
+#define DOT11_MNG_EXT_CSA_ID 60
+#define DOT11_MNG_HT_ADD 61
+#define DOT11_MNG_EXT_CHANNEL_OFFSET 62
+#define DOT11_MNG_WAPI_ID 68
+#define DOT11_MNG_TIME_ADVERTISE_ID 69
+#define DOT11_MNG_RRM_CAP_ID 70
+#define DOT11_MNG_HT_BSS_COEXINFO_ID 72
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73
+#define DOT11_MNG_HT_OBSS_ID 74
+#define DOT11_MNG_CHANNEL_USAGE 97
+#define DOT11_MNG_TIME_ZONE_ID 98
+#define DOT11_MNG_LINK_IDENTIFIER_ID 101
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104
+#define DOT11_MNG_PTI_CONTROL_ID 105
+#define DOT11_MNG_PU_BUFFER_STATUS_ID 106
+#define DOT11_MNG_INTERWORKING_ID 107
+#define DOT11_MNG_ADVERTISEMENT_ID 108
+#define DOT11_MNG_EXP_BW_REQ_ID 109
+#define DOT11_MNG_QOS_MAP_ID 110
+#define DOT11_MNG_ROAM_CONSORT_ID 111
+#define DOT11_MNG_EMERGCY_ALERT_ID 112
+#define DOT11_MNG_EXT_CAP_ID 127
+#define DOT11_MNG_VHT_CAP_ID 191
+#define DOT11_MNG_VHT_OPERATION_ID 192
+
+#define DOT11_MNG_WPA_ID 221
+#define DOT11_MNG_PROPR_ID 221
+
+#define DOT11_MNG_VS_ID 221
+
+
+#define DOT11_RATE_BASIC 0x80
+#define DOT11_RATE_MASK 0x7F
+
+
+#define DOT11_MNG_ERP_LEN 1
+#define DOT11_MNG_NONERP_PRESENT 0x01
+#define DOT11_MNG_USE_PROTECTION 0x02
+#define DOT11_MNG_BARKER_PREAMBLE 0x04
+
+#define DOT11_MGN_TS_DELAY_LEN 4
+#define TS_DELAY_FIELD_SIZE 4
+
+
+#define DOT11_CAP_ESS 0x0001
+#define DOT11_CAP_IBSS 0x0002
+#define DOT11_CAP_POLLABLE 0x0004
+#define DOT11_CAP_POLL_RQ 0x0008
+#define DOT11_CAP_PRIVACY 0x0010
+#define DOT11_CAP_SHORT 0x0020
+#define DOT11_CAP_PBCC 0x0040
+#define DOT11_CAP_AGILITY 0x0080
+#define DOT11_CAP_SPECTRUM 0x0100
+#define DOT11_CAP_SHORTSLOT 0x0400
+#define DOT11_CAP_RRM 0x1000
+#define DOT11_CAP_CCK_OFDM 0x2000
+
+
+
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0
+
+#define DOT11_EXT_CAP_SPSMP 6
+
+#define DOT11_EXT_CAP_BSS_TRANSITION_MGMT 19
+
+#define DOT11_EXT_CAP_IW 31
+
+#define DOT11_EXT_CAP_SI 41
+#define DOT11_EXT_CAP_SI_MASK 0x0E
+
+
+#define DOT11_ACTION_HDR_LEN 2
+#define DOT11_ACTION_CAT_OFF 0
+#define DOT11_ACTION_ACT_OFF 1
+
+
+#define DOT11_ACTION_CAT_ERR_MASK 0x80
+#define DOT11_ACTION_CAT_MASK 0x7F
+#define DOT11_ACTION_CAT_SPECT_MNG 0
+#define DOT11_ACTION_CAT_QOS 1
+#define DOT11_ACTION_CAT_DLS 2
+#define DOT11_ACTION_CAT_BLOCKACK 3
+#define DOT11_ACTION_CAT_PUBLIC 4
+#define DOT11_ACTION_CAT_RRM 5
+#define DOT11_ACTION_CAT_FBT 6
+#define DOT11_ACTION_CAT_HT 7
+#define DOT11_ACTION_CAT_SA_QUERY 8
+#define DOT11_ACTION_CAT_PDPA 9
+#define DOT11_ACTION_CAT_BSSMGMT 10
+#define DOT11_ACTION_NOTIFICATION 17
+#define DOT11_ACTION_CAT_VSP 126
+#define DOT11_ACTION_CAT_VS 127
+
+
+#define DOT11_SM_ACTION_M_REQ 0
+#define DOT11_SM_ACTION_M_REP 1
+#define DOT11_SM_ACTION_TPC_REQ 2
+#define DOT11_SM_ACTION_TPC_REP 3
+#define DOT11_SM_ACTION_CHANNEL_SWITCH 4
+#define DOT11_SM_ACTION_EXT_CSA 5
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0
+#define DOT11_ACTION_ID_HT_MIMO_PS 1
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ 0
+#define DOT11_BA_ACTION_ADDBA_RESP 1
+#define DOT11_BA_ACTION_DELBA 2
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6
+
+#define DOT11_ADDBA_POLICY_DELAYED 0
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1
+
+
+#define DOT11_FT_ACTION_FT_RESERVED 0
+#define DOT11_FT_ACTION_FT_REQ 1
+#define DOT11_FT_ACTION_FT_RES 2
+#define DOT11_FT_ACTION_FT_CON 3
+#define DOT11_FT_ACTION_FT_ACK 4
+
+
+#define DOT11_DLS_ACTION_REQ 0
+#define DOT11_DLS_ACTION_RESP 1
+#define DOT11_DLS_ACTION_TD 2
+
+
+#define DOT11_WNM_ACTION_EVENT_REQ 0
+#define DOT11_WNM_ACTION_EVENT_REP 1
+#define DOT11_WNM_ACTION_DIAG_REQ 2
+#define DOT11_WNM_ACTION_DIAG_REP 3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ 4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP 5
+#define DOT11_WNM_ACTION_BSS_TRANS_QURY 6
+#define DOT11_WNM_ACTION_BSS_TRANS_REQ 7
+#define DOT11_WNM_ACTION_BSS_TRANS_RESP 8
+#define DOT11_WNM_ACTION_FMS_REQ 9
+#define DOT11_WNM_ACTION_FMS_RESP 10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12
+#define DOT11_WNM_ACTION_TFS_REQ 13
+#define DOT11_WNM_ACTION_TFS_RESP 14
+#define DOT11_WNM_ACTION_TFS_NOTIFY 15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17
+#define DOT11_WNM_ACTION_TIM_BCAST_REQ 18
+#define DOT11_WNM_ACTION_TIM_BCAST_RESP 19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22
+#define DOT11_WNM_ACTION_DMS_REQ 23
+#define DOT11_WNM_ACTION_DMS_RESP 24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ 26
+#define DOT11_WNM_ACTION_NOTFCTN_RES 27
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+ uint8 category;
+ uint8 action;
+ struct ether_addr da;
+ struct ether_addr sa;
+ uint16 cap;
+ uint16 timeout;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+ uint8 category;
+ uint8 action;
+ uint16 status;
+ struct ether_addr da;
+ struct ether_addr sa;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_query {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 reason;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_query dot11_bss_trans_query_t;
+#define DOT11_BSS_TRANS_QUERY_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 reqmode;
+ uint16 disassoc_tmr;
+ uint8 validity_intrvl;
+ uint8 data[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_req dot11_bss_trans_req_t;
+#define DOT11_BSS_TRANS_REQ_LEN 7
+
+#define DOT11_BSS_TERM_DUR_LEN 12
+
+
+
+#define DOT11_BSS_TRNS_REQMODE_PREF_LIST_INCL 0x01
+#define DOT11_BSS_TRNS_REQMODE_ABRIDGED 0x02
+#define DOT11_BSS_TRNS_REQMODE_DISASSOC_IMMINENT 0x04
+#define DOT11_BSS_TRNS_REQMODE_BSS_TERM_INCL 0x08
+#define DOT11_BSS_TRNS_REQMODE_ESS_DISASSOC_IMNT 0x10
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_res {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 term_delay;
+ uint8 data[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_res dot11_bss_trans_res_t;
+#define DOT11_BSS_TRANS_RES_LEN 5
+
+
+#define DOT11_BSS_TRNS_RES_STATUS_ACCEPT 0
+#define DOT11_BSS_TRNS_RES_STATUS_REJECT 1
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_BCN 2
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_CAP 3
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_UNDESIRED 4
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_DELAY_REQ 5
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_BSS_LIST_PROVIDED 6
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_NO_SUITABLE_BSS 7
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_LEAVING_ESS 8
+
+
+
+#define DOT11_NBR_RPRT_BSSID_INFO_REACHABILTY 0x0003
+#define DOT11_NBR_RPRT_BSSID_INFO_SEC 0x0004
+#define DOT11_NBR_RPRT_BSSID_INFO_KEY_SCOPE 0x0008
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP 0x03f0
+
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_SPEC_MGMT 0x0010
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_QOS 0x0020
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_APSD 0x0040
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_RDIO_MSMT 0x0080
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_DEL_BA 0x0100
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_IMM_BA 0x0200
+
+
+#define DOT11_NBR_RPRT_SUBELEM_BSS_CANDDT_PREF_ID 3
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 addba_param_set;
+ uint16 timeout;
+ uint16 start_seqnum;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN 9
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 status;
+ uint16 addba_param_set;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN 9
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000
+#define DOT11_DELBA_PARAM_TID_SHIFT 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+ uint8 category;
+ uint8 action;
+ uint16 delba_param_set;
+ uint16 reason;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN 6
+
+
+#define SA_QUERY_REQUEST 0
+#define SA_QUERY_RESPONSE 1
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+ uint8 category;
+ uint8 action;
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+ uint8 category;
+ uint8 action;
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint16 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+
+
+
+
+
+#define DOT11_RRM_CAP_LEN 5
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+ uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+
+#define DOT11_RRM_CAP_LINK 0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1
+#define DOT11_RRM_CAP_PARALLEL 2
+#define DOT11_RRM_CAP_REPEATED 3
+#define DOT11_RRM_CAP_BCN_PASSIVE 4
+#define DOT11_RRM_CAP_BCN_ACTIVE 5
+#define DOT11_RRM_CAP_BCN_TABLE 6
+#define DOT11_RRM_CAP_BCN_REP_COND 7
+#define DOT11_RRM_CAP_AP_CHANREP 16
+
+
+
+#define DOT11_OP_CLASS_NONE 255
+
+
+
+#define DOT11_RM_ACTION_RM_REQ 0
+#define DOT11_RM_ACTION_RM_REP 1
+#define DOT11_RM_ACTION_LM_REQ 2
+#define DOT11_RM_ACTION_LM_REP 3
+#define DOT11_RM_ACTION_NR_REQ 4
+#define DOT11_RM_ACTION_NR_REP 5
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 reps;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN 5
+
+
+#define DOT11_RMREQ_MODE_PARALLEL 1
+#define DOT11_RMREQ_MODE_ENABLE 2
+#define DOT11_RMREQ_MODE_REQUEST 4
+#define DOT11_RMREQ_MODE_REPORT 8
+#define DOT11_RMREQ_MODE_DURMAND 0x10
+
+
+#define DOT11_RMREP_MODE_LATE 1
+#define DOT11_RMREP_MODE_INCAPABLE 2
+#define DOT11_RMREP_MODE_REFUSED 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+ uint8 bcn_mode;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN 18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 frame_info;
+ uint8 rcpi;
+ uint8 rsni;
+ struct ether_addr bssid;
+ uint8 antenna_id;
+ uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN 26
+
+
+#define DOT11_RMREQ_BCN_PASSIVE 0
+#define DOT11_RMREQ_BCN_ACTIVE 1
+#define DOT11_RMREQ_BCN_TABLE 2
+
+
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID 1
+#define DOT11_RMREQ_BCN_REPDET_ID 2
+#define DOT11_RMREQ_BCN_REQUEST_ID 10
+#define DOT11_RMREQ_BCN_APCHREP_ID 51
+
+
+#define DOT11_RMREQ_BCN_REPDET_FIXED 0
+#define DOT11_RMREQ_BCN_REPDET_REQUEST 1
+#define DOT11_RMREQ_BCN_REPDET_ALL 2
+
+
+#define DOT11_RMREP_BCN_FRM_BODY 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr {
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uchar sub_elements[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t;
+#define DOT11_RMREP_NBR_LEN 13
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0
+#define DOT11_BSSTYPE_INDEPENDENT 1
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_SCANTYPE_ACTIVE 0
+#define DOT11_SCANTYPE_PASSIVE 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 txpwr;
+ uint8 maxtxpwr;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ dot11_tpc_rep_t tpc;
+ uint8 rxant;
+ uint8 txant;
+ uint8 rcpi;
+ uint8 rsni;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN 11
+
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
+
+
+#define RIFS_11N_TIME 2
+
+
+
+#define HT_SIG1_MCS_MASK 0x00007F
+#define HT_SIG1_CBW 0x000080
+#define HT_SIG1_HT_LENGTH 0xFFFF00
+
+
+#define HT_SIG2_SMOOTHING 0x000001
+#define HT_SIG2_NOT_SOUNDING 0x000002
+#define HT_SIG2_RESERVED 0x000004
+#define HT_SIG2_AGGREGATION 0x000008
+#define HT_SIG2_STBC_MASK 0x000030
+#define HT_SIG2_STBC_SHIFT 4
+#define HT_SIG2_FEC_CODING 0x000040
+#define HT_SIG2_SHORT_GI 0x000080
+#define HT_SIG2_ESS_MASK 0x000300
+#define HT_SIG2_ESS_SHIFT 8
+#define HT_SIG2_CRC 0x03FC00
+#define HT_SIG2_TAIL 0x1C0000
+
+
+#define APHY_SLOT_TIME 9
+#define APHY_SIFS_TIME 16
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SYMBOL_TIME 4
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define APHY_CWMIN 15
+
+
+#define BPHY_SLOT_TIME 20
+#define BPHY_SIFS_TIME 10
+#define BPHY_DIFS_TIME 50
+#define BPHY_PLCP_TIME 192
+#define BPHY_PLCP_SHORT_TIME 96
+#define BPHY_CWMIN 31
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+
+#define PHY_CWMAX 1023
+
+#define DOT11_MAXNUMFRAGS 16
+
+
+
+typedef int vht_group_id_t;
+
+
+
+#define VHT_SIGA1_CONST_MASK 0x800004
+
+#define VHT_SIGA1_20MHZ_VAL 0x000000
+#define VHT_SIGA1_40MHZ_VAL 0x000001
+#define VHT_SIGA1_80MHZ_VAL 0x000002
+#define VHT_SIGA1_160MHZ_VAL 0x000003
+
+#define VHT_SIGA1_STBC 0x000008
+
+#define VHT_SIGA1_GID_MAX_GID 0x3f
+#define VHT_SIGA1_GID_SHIFT 4
+#define VHT_SIGA1_GID_TO_AP 0x00
+#define VHT_SIGA1_GID_NOT_TO_AP 0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT 10
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+
+#define VHT_SIGA1_PARTIAL_AID_SHIFT 13
+
+
+#define VHT_SIGA2_GI_NONE 0x000000
+#define VHT_SIGA2_GI_SHORT 0x000001
+#define VHT_SIGA2_GI_W_MOD10 0x000002
+#define VHT_SIGA2_CODING_LDPC 0x000004
+#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100
+#define VHT_SIGA2_MCS_SHIFT 4
+
+#define VHT_SIGA2_B9_RESERVED 0x000200
+#define VHT_SIGA2_TAIL_MASK 0xfc0000
+#define VHT_SIGA2_TAIL_VALUE 0x000000
+
+#define VHT_SIGA2_SVC_BITS 16
+#define VHT_SIGA2_TAIL_BITS 6
+
+
+
+typedef struct d11cnt {
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI "\x00\x90\x4C"
+
+
+
+#define BRCM_OUI "\x00\x10\x18"
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 ver;
+ uint8 assoc;
+ uint8 flags;
+ uint8 flags1;
+ uint16 amsdu_mtu_pref;
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 11
+#define BRCM_IE_VER 2
+#define BRCM_IE_LEGACY_AES_VER 1
+
+
+#define BRF_LZWDS 0x4
+#define BRF_BLOCKACK 0x8
+
+
+#define BRF1_AMSDU 0x1
+#define BRF1_WMEPS 0x4
+#define BRF1_PSOFIX 0x8
+#define BRF1_RX_LARGE_AGG 0x10
+#define BRF1_RFAWARE_DCS 0x20
+#define BRF1_SOFTAP 0x40
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+ uchar id;
+ uchar len;
+ uchar oui [3];
+ uchar data [1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN 2
+#define VNDR_IE_MIN_LEN 3
+#define VNDR_IE_MAX_LEN 256
+
+
+#define MCSSET_LEN 16
+#define MAX_MCS_NUM (128)
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+ uint16 cap;
+ uint8 params;
+ uint8 supp_mcs[MCSSET_LEN];
+ uint16 ext_htcap;
+ uint32 txbf_cap;
+ uint8 as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD 4
+#define HT_CAP_IE_LEN 26
+#define HT_CAP_IE_TYPE 51
+
+#define HT_CAP_LDPC_CODING 0x0001
+#define HT_CAP_40MHZ 0x0002
+#define HT_CAP_MIMO_PS_MASK 0x000C
+#define HT_CAP_MIMO_PS_SHIFT 0x0002
+#define HT_CAP_MIMO_PS_OFF 0x0003
+#define HT_CAP_MIMO_PS_RTS 0x0001
+#define HT_CAP_MIMO_PS_ON 0x0000
+#define HT_CAP_GF 0x0010
+#define HT_CAP_SHORT_GI_20 0x0020
+#define HT_CAP_SHORT_GI_40 0x0040
+#define HT_CAP_TX_STBC 0x0080
+#define HT_CAP_RX_STBC_MASK 0x0300
+#define HT_CAP_RX_STBC_SHIFT 8
+#define HT_CAP_DELAYED_BA 0x0400
+#define HT_CAP_MAX_AMSDU 0x0800
+
+#define HT_CAP_DSSS_CCK 0x1000
+#define HT_CAP_PSMP 0x2000
+#define HT_CAP_40MHZ_INTOLERANT 0x4000
+#define HT_CAP_LSIG_TXOP 0x8000
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3
+
+#define VHT_MAX_MPDU 11454
+#define VHT_MPDU_MSDU_DELTA 56
+
+#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU 7935
+#define HT_MIN_AMSDU 3835
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03
+#define HT_PARAMS_DENSITY_MASK 0x1C
+#define HT_PARAMS_DENSITY_SHIFT 2
+
+
+#define AMPDU_MAX_MPDU_DENSITY 7
+#define AMPDU_DENSITY_NONE 0
+#define AMPDU_DENSITY_1over4_US 1
+#define AMPDU_DENSITY_1over2_US 2
+#define AMPDU_DENSITY_1_US 3
+#define AMPDU_DENSITY_2_US 4
+#define AMPDU_DENSITY_4_US 5
+#define AMPDU_DENSITY_8_US 6
+#define AMPDU_DENSITY_16_US 7
+#define AMPDU_RX_FACTOR_8K 0
+#define AMPDU_RX_FACTOR_16K 1
+#define AMPDU_RX_FACTOR_32K 2
+#define AMPDU_RX_FACTOR_64K 3
+#define AMPDU_RX_FACTOR_BASE 8*1024
+
+#define AMPDU_DELIMITER_LEN 4
+#define AMPDU_DELIMITER_LEN_MAX 63
+
+#define HT_CAP_EXT_PCO 0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT 1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8
+#define HT_CAP_EXT_HTC 0x0400
+#define HT_CAP_EXT_RD_RESP 0x0800
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+ uint8 ctl_ch;
+ uint8 byte1;
+ uint16 opmode;
+ uint16 misc_bits;
+ uint8 basic_mcs[MCSSET_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN 22
+#define HT_ADD_IE_TYPE 52
+
+
+#define HT_BW_ANY 0x04
+#define HT_RIFS_PERMITTED 0x08
+
+
+#define HT_OPMODE_MASK 0x0003
+#define HT_OPMODE_SHIFT 0
+#define HT_OPMODE_PURE 0x0000
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+#define HT_BASIC_STBC_MCS 0x007f
+#define HT_DUAL_STBC_PROT 0x0080
+#define HT_SECOND_BCN 0x0100
+#define HT_LSIG_TXOP 0x0200
+#define HT_PCO_ACTIVE 0x0400
+#define HT_PCO_PHASE 0x0800
+#define HT_DUALCTS_PROTECTION 0x0080
+
+
+#define DOT11N_2G_TXBURST_LIMIT 6160
+#define DOT11N_5G_TXBURST_LIMIT 3080
+
+
+#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ >> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_MIXED)
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_HT20IN40)
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_OPTIONAL)
+#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
+ HT_MIXEDMODE_PRESENT((add_ie)))
+#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+ == HT_OPMODE_NONGF)
+#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+ == DOT11N_TXBURST)
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+ == DOT11N_OBSS_NONHT)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+ uint16 passive_dwell;
+ uint16 active_dwell;
+ uint16 bss_widthscan_interval;
+ uint16 passive_total;
+ uint16 active_total;
+ uint16 chanwidth_transition_dly;
+ uint16 activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+ uint8 id;
+ uint8 len;
+ obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t)
+
+
+#define HT_CTRL_LA_TRQ 0x00000002
+#define HT_CTRL_LA_MAI 0x0000003C
+#define HT_CTRL_LA_MAI_SHIFT 2
+#define HT_CTRL_LA_MAI_MRQ 0x00000004
+#define HT_CTRL_LA_MAI_MSI 0x00000038
+#define HT_CTRL_LA_MFSI 0x000001C0
+#define HT_CTRL_LA_MFSI_SHIFT 6
+#define HT_CTRL_LA_MFB_ASELC 0x0000FE00
+#define HT_CTRL_LA_MFB_ASELC_SH 9
+#define HT_CTRL_LA_ASELC_CMD 0x00000C00
+#define HT_CTRL_LA_ASELC_DATA 0x0000F000
+#define HT_CTRL_CAL_POS 0x00030000
+#define HT_CTRL_CAL_SEQ 0x000C0000
+#define HT_CTRL_CSI_STEERING 0x00C00000
+#define HT_CTRL_CSI_STEER_SHIFT 22
+#define HT_CTRL_CSI_STEER_NFB 0
+#define HT_CTRL_CSI_STEER_CSI 1
+#define HT_CTRL_CSI_STEER_NCOM 2
+#define HT_CTRL_CSI_STEER_COM 3
+#define HT_CTRL_NDP_ANNOUNCE 0x01000000
+#define HT_CTRL_AC_CONSTRAINT 0x40000000
+#define HT_CTRL_RDG_MOREPPDU 0x80000000
+
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+ uint32 vht_cap_info;
+
+ uint16 rx_mcs_map;
+ uint16 rx_max_rate;
+ uint16 tx_mcs_map;
+ uint16 tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+#define VHT_CAP_IE_LEN 12
+
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c
+#define VHT_CAP_INFO_LDPC 0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ 0x00000020
+
+#define VHT_CAP_INFO_SGI_160MHZ 0x00000040
+#define VHT_CAP_INFO_TX_STBC 0x00000080
+
+#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT 8
+#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13
+
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16
+#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000
+#define VHT_CAP_INFO_TXOPPS 0x00200000
+#define VHT_CAP_INFO_HTCVHT 0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23
+
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26
+
+
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0
+
+#define VHT_CAP_MCS_MAP_0_7 0
+#define VHT_CAP_MCS_MAP_0_8 1
+#define VHT_CAP_MCS_MAP_0_9 2
+#define VHT_CAP_MCS_MAP_NONE 3
+
+#define VHT_CAP_MCS_MAP_NSS_MAX 8
+
+
+typedef enum vht_cap_chan_width {
+ VHT_CAP_CHAN_WIDTH_20_40 = 0x00,
+ VHT_CAP_CHAN_WIDTH_80 = 0x04,
+ VHT_CAP_CHAN_WIDTH_160 = 0x08
+} vht_cap_chan_width_t;
+
+
+typedef enum vht_cap_max_mpdu_len {
+ VHT_CAP_MPDU_MAX_4K = 0x00,
+ VHT_CAP_MPDU_MAX_8K = 0x01,
+ VHT_CAP_MPDU_MAX_11K = 0x02
+} vht_cap_max_mpdu_len_t;
+
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+ uint8 chan_width;
+ uint8 chan1;
+ uint8 chan2;
+ uint16 supp_mcs;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+ VHT_OP_CHAN_WIDTH_20_40 = 0,
+ VHT_OP_CHAN_WIDTH_80 = 1,
+ VHT_OP_CHAN_WIDTH_160 = 2,
+ VHT_OP_CHAN_WIDTH_80_80 = 3
+} vht_op_chan_width_t;
+
+
+#define VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams) ((numSpatialStreams-1)*2)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(numSpatialStreams, mcsMap) \
+ ((mcsMap >> VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams)) & 0x3)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(numSpatialStreams, numMcs, mcsMap) \
+ (mcsMap |= ((numMcs & 0x3) << VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams)))
+
+
+#define WPA_OUI "\x00\x50\xF2"
+#define WPA_OUI_LEN 3
+#define WPA_OUI_TYPE 1
+#define WPA_VERSION 1
+#define WPA2_OUI "\x00\x0F\xAC"
+#define WPA2_OUI_LEN 3
+#define WPA2_VERSION 1
+#define WPA2_VERSION_LEN 2
+
+
+#define WPS_OUI "\x00\x50\xF2"
+#define WPS_OUI_LEN 3
+#define WPS_OUI_TYPE 4
+
+
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI MAC_OUI
+#else
+#define WFA_OUI "\x50\x6F\x9A"
+#endif
+#define WFA_OUI_LEN 3
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_P2P 9
+#endif
+
+#define WFA_OUI_TYPE_TPC 8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_WFD 10
+#endif
+
+
+#define RSN_AKM_NONE 0
+#define RSN_AKM_UNSPECIFIED 1
+#define RSN_AKM_PSK 2
+#define RSN_AKM_FBT_1X 3
+#define RSN_AKM_FBT_PSK 4
+#define RSN_AKM_MFP_1X 5
+#define RSN_AKM_MFP_PSK 6
+#define RSN_AKM_TPK 7
+
+
+#define DOT11_MAX_DEFAULT_KEYS 4
+#define DOT11_MAX_KEY_SIZE 32
+#define DOT11_MAX_IV_SIZE 16
+#define DOT11_EXT_IV_FLAG (1<<5)
+#define DOT11_WPA_KEY_RSC_LEN 8
+
+#define WEP1_KEY_SIZE 5
+#define WEP1_KEY_HEX_SIZE 10
+#define WEP128_KEY_SIZE 13
+#define WEP128_KEY_HEX_SIZE 26
+#define TKIP_MIC_SIZE 8
+#define TKIP_EOM_SIZE 7
+#define TKIP_EOM_FLAG 0x5a
+#define TKIP_KEY_SIZE 32
+#define TKIP_MIC_AUTH_TX 16
+#define TKIP_MIC_AUTH_RX 24
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX
+#define AES_KEY_SIZE 16
+#define AES_MIC_SIZE 8
+#define BIP_KEY_SIZE 16
+
+
+#define WCN_OUI "\x00\x50\xf2"
+#define WCN_TYPE 4
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN 16
+#define SMS4_WPI_CBC_MAC_LEN 16
+#endif
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mdid;
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS 0x01
+#define FBT_MDID_CAP_RRP 0x02
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mic_control;
+ uint8 mic[16];
+ uint8 anonce[32];
+ uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED 0
+#define TIE_TYPE_REASSOC_DEADLINE 1
+#define TIE_TYPE_KEY_LIEFTIME 2
+#define TIE_TYPE_ASSOC_COMEBACK 3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+ uint8 id;
+ uint8 len;
+ uint8 type;
+ uint32 value;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+ uint8 id;
+ uint8 len;
+ uint16 key_info;
+ uint8 key_len;
+ uint8 rsc[8];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 20
+#define WAPI_VERSION 1
+#define WAPI_VERSION_LEN 2
+#define WAPI_OUI "\x00\x14\x72"
+#define WAPI_OUI_LEN DOT11_OUI_LEN
+#endif
+
+
+#define WMM_OUI "\x00\x50\xF2"
+#define WMM_OUI_LEN 3
+#define WMM_OUI_TYPE 2
+#define WMM_VERSION 1
+#define WMM_VERSION_LEN 1
+
+
+#define WMM_OUI_SUBTYPE_PARAMETER 1
+#define WMM_PARAMETER_IE_LEN 24
+
+
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ struct ether_addr tdls_init_mac;
+ struct ether_addr tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+ uint8 id;
+ uint8 len;
+ uint32 offset;
+ uint32 interval;
+ uint32 awake_win_slots;
+ uint32 max_wake_win;
+ uint16 idle_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+ uint8 id;
+ uint8 len;
+ uint16 switch_time;
+ uint16 switch_timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+ uint8 id;
+ uint8 len;
+ uint8 tid;
+ uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN 3
+
+
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+ uint8 id;
+ uint8 len;
+ uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN 1
+#define TDLS_PU_BUFFER_STATUS_AC_BK 1
+#define TDLS_PU_BUFFER_STATUS_AC_BE 2
+#define TDLS_PU_BUFFER_STATUS_AC_VI 4
+#define TDLS_PU_BUFFER_STATUS_AC_VO 8
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 000000000000..3ee5a748695a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h 294267 2011-11-04 23:41:52Z $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP 1
+#define BTA_PROT_ACTIVITY_REPORT 2
+#define BTA_PROT_SECURITY 3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST 4
+#define BTA_PROT_LINK_SUPERVISION_REPLY 5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS 1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS 2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS 3
+#define BTA_TYPE_ID_CAPABILITIES 4
+#define BTA_TYPE_ID_VERSION 5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 000000000000..f391e68c1104
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+ uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+ uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */
+ uint8 type; /* WME_TYPE */
+ uint8 subtype; /* WME_SUBTYPE_TSPEC */
+ uint8 version; /* WME_VERSION */
+ tsinfo_t tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /* Minimum Service Interval (us) */
+ uint32 max_srv_interval; /* Maximum Service Interval (us) */
+ uint32 inactivity_interval; /* Inactivity Interval (us) */
+ uint32 suspension_interval; /* Suspension Interval (us) */
+ uint32 srv_start_time; /* Service Start Time (us) */
+ uint32 min_data_rate; /* Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /* Mean Data Rate (bps) */
+ uint32 peak_data_rate; /* Peak Data Rate (bps) */
+ uint32 max_burst_size; /* Maximum Burst Size (bytes) */
+ uint32 delay_bound; /* Delay Bound (us) */
+ uint32 min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */
+#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */
+#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */
+#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \
+ TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \
+ TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+ ((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+ ((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
+#define DOT11E_STATUS_END_TS 37 /* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 000000000000..116a226b1b42
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define PRIO_8021D_NONE 2
+#define PRIO_8021D_BK 1
+#define PRIO_8021D_BE 0
+#define PRIO_8021D_EE 3
+#define PRIO_8021D_CL 4
+#define PRIO_8021D_VI 5
+#define PRIO_8021D_VO 6
+#define PRIO_8021D_NC 7
+#define MAXPRIO 7
+#define NUMPRIO (MAXPRIO + 1)
+
+#define ALLPRIO -1
+
+
+#define PRIO2PREC(prio) \
+ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 000000000000..e54b2e381872
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,82 @@
+/*
+ * Broadcom Ethernettype protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h 294352 2011-11-06 19:23:00Z $
+ */
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define BCMILCP_SUBTYPE_RATE 1
+#define BCMILCP_SUBTYPE_LINK 2
+#define BCMILCP_SUBTYPE_CSA 3
+#define BCMILCP_SUBTYPE_LARQ 4
+#define BCMILCP_SUBTYPE_VENDOR 5
+#define BCMILCP_SUBTYPE_FLH 17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
+#define BCMILCP_SUBTYPE_CERT 32770
+#define BCMILCP_SUBTYPE_SES 32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED 0
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BCMILCP_BCM_SUBTYPE_SES 2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+ uint16 subtype;
+ uint16 length;
+ uint8 version;
+ uint8 oui[3];
+
+ uint16 usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 000000000000..0a337f6420c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,329 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h 326276 2012-04-06 23:16:42Z $
+ *
+ */
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION 2
+#define BCM_MSG_IFNAME_MAX 16
+
+
+#define WLC_EVENT_MSG_LINK 0x01
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02
+#define WLC_EVENT_MSG_GROUP 0x04
+#define WLC_EVENT_MSG_UNKBSS 0x08
+#define WLC_EVENT_MSG_UNKIF 0x10
+
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+ uint8 ifidx;
+ uint8 bsscfgidx;
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ wl_event_msg_t event;
+
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID 0
+#define WLC_E_JOIN 1
+#define WLC_E_START 2
+#define WLC_E_AUTH 3
+#define WLC_E_AUTH_IND 4
+#define WLC_E_DEAUTH 5
+#define WLC_E_DEAUTH_IND 6
+#define WLC_E_ASSOC 7
+#define WLC_E_ASSOC_IND 8
+#define WLC_E_REASSOC 9
+#define WLC_E_REASSOC_IND 10
+#define WLC_E_DISASSOC 11
+#define WLC_E_DISASSOC_IND 12
+#define WLC_E_QUIET_START 13
+#define WLC_E_QUIET_END 14
+#define WLC_E_BEACON_RX 15
+#define WLC_E_LINK 16
+#define WLC_E_MIC_ERROR 17
+#define WLC_E_NDIS_LINK 18
+#define WLC_E_ROAM 19
+#define WLC_E_TXFAIL 20
+#define WLC_E_PMKID_CACHE 21
+#define WLC_E_RETROGRADE_TSF 22
+#define WLC_E_PRUNE 23
+#define WLC_E_AUTOAUTH 24
+#define WLC_E_EAPOL_MSG 25
+#define WLC_E_SCAN_COMPLETE 26
+#define WLC_E_ADDTS_IND 27
+#define WLC_E_DELTS_IND 28
+#define WLC_E_BCNSENT_IND 29
+#define WLC_E_BCNRX_MSG 30
+#define WLC_E_BCNLOST_MSG 31
+#define WLC_E_ROAM_PREP 32
+#define WLC_E_PFN_NET_FOUND 33
+#define WLC_E_PFN_NET_LOST 34
+#define WLC_E_RESET_COMPLETE 35
+#define WLC_E_JOIN_START 36
+#define WLC_E_ROAM_START 37
+#define WLC_E_ASSOC_START 38
+#define WLC_E_IBSS_ASSOC 39
+#define WLC_E_RADIO 40
+#define WLC_E_PSM_WATCHDOG 41
+#define WLC_E_PROBREQ_MSG 44
+#define WLC_E_SCAN_CONFIRM_IND 45
+#define WLC_E_PSK_SUP 46
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48
+#define WLC_E_ICV_ERROR 49
+#define WLC_E_UNICAST_DECODE_ERROR 50
+#define WLC_E_MULTICAST_DECODE_ERROR 51
+#define WLC_E_TRACE 52
+#ifdef WLBTAMP
+#define WLC_E_BTA_HCI_EVENT 53
+#endif
+#define WLC_E_IF 54
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55
+#define WLC_E_RSSI 56
+#define WLC_E_PFN_SCAN_COMPLETE 57
+#define WLC_E_EXTLOG_MSG 58
+#define WLC_E_ACTION_FRAME 59
+#define WLC_E_ACTION_FRAME_COMPLETE 60
+#define WLC_E_PRE_ASSOC_IND 61
+#define WLC_E_PRE_REASSOC_IND 62
+#define WLC_E_CHANNEL_ADOPTED 63
+#define WLC_E_AP_STARTED 64
+#define WLC_E_DFS_AP_STOP 65
+#define WLC_E_DFS_AP_RESUME 66
+#define WLC_E_WAI_STA_EVENT 67
+#define WLC_E_WAI_MSG 68
+#define WLC_E_ESCAN_RESULT 69
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
+#define WLC_E_PROBRESP_MSG 71
+#define WLC_E_P2P_PROBREQ_MSG 72
+#define WLC_E_DCS_REQUEST 73
+
+#define WLC_E_FIFO_CREDIT_MAP 74
+
+#define WLC_E_ACTION_FRAME_RX 75
+#define WLC_E_WAKE_EVENT 76
+#define WLC_E_RM_COMPLETE 77
+#define WLC_E_HTSFSYNC 78
+#define WLC_E_OVERLAY_REQ 79
+#define WLC_E_CSA_COMPLETE_IND 80
+#define WLC_E_EXCESS_PM_WAKE_EVENT 81
+#define WLC_E_PFN_SCAN_NONE 82
+#define WLC_E_PFN_SCAN_ALLGONE 83
+#define WLC_E_GTK_PLUMBED 84
+#define WLC_E_ASSOC_IND_NDIS 85
+#define WLC_E_REASSOC_IND_NDIS 86
+#define WLC_E_ASSOC_REQ_IE 87
+#define WLC_E_ASSOC_RESP_IE 88
+#define WLC_E_ASSOC_RECREATED 89
+#define WLC_E_ACTION_FRAME_RX_NDIS 90
+#define WLC_E_AUTH_REQ 91
+#define WLC_E_TDLS_PEER_EVENT 92
+#define WLC_E_SPEEDY_RECREATE_FAIL 93
+#define WLC_E_LAST 94
+
+
+
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_t;
+
+extern const bcmevent_name_t bcmevent_names[];
+extern const int bcmevent_names_size;
+
+
+#define WLC_E_STATUS_SUCCESS 0
+#define WLC_E_STATUS_FAIL 1
+#define WLC_E_STATUS_TIMEOUT 2
+#define WLC_E_STATUS_NO_NETWORKS 3
+#define WLC_E_STATUS_ABORT 4
+#define WLC_E_STATUS_NO_ACK 5
+#define WLC_E_STATUS_UNSOLICITED 6
+#define WLC_E_STATUS_ATTEMPT 7
+#define WLC_E_STATUS_PARTIAL 8
+#define WLC_E_STATUS_NEWSCAN 9
+#define WLC_E_STATUS_NEWASSOC 10
+#define WLC_E_STATUS_11HQUIET 11
+#define WLC_E_STATUS_SUPPRESS 12
+#define WLC_E_STATUS_NOCHANS 13
+#define WLC_E_STATUS_CS_ABORT 15
+#define WLC_E_STATUS_ERROR 16
+
+
+#define WLC_E_REASON_INITIAL_ASSOC 0
+#define WLC_E_REASON_LOW_RSSI 1
+#define WLC_E_REASON_DEAUTH 2
+#define WLC_E_REASON_DISASSOC 3
+#define WLC_E_REASON_BCNS_LOST 4
+#define WLC_E_REASON_MINTXRATE 9
+#define WLC_E_REASON_TXFAIL 10
+
+
+#define WLC_E_REASON_FAST_ROAM_FAILED 5
+#define WLC_E_REASON_DIRECTED_ROAM 6
+#define WLC_E_REASON_TSPEC_REJECTED 7
+#define WLC_E_REASON_BETTER_AP 8
+
+
+#define WLC_E_REASON_REQUESTED_ROAM 11
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH 1
+#define WLC_E_PRUNE_BCAST_BSSID 2
+#define WLC_E_PRUNE_MAC_DENY 3
+#define WLC_E_PRUNE_MAC_NA 4
+#define WLC_E_PRUNE_REG_PASSV 5
+#define WLC_E_PRUNE_SPCT_MGMT 6
+#define WLC_E_PRUNE_RADAR 7
+#define WLC_E_RSN_MISMATCH 8
+#define WLC_E_PRUNE_NO_COMMON_RATES 9
+#define WLC_E_PRUNE_BASIC_RATES 10
+#define WLC_E_PRUNE_CIPHER_NA 12
+#define WLC_E_PRUNE_KNOWN_STA 13
+#define WLC_E_PRUNE_WDS_PEER 15
+#define WLC_E_PRUNE_QBSS_LOAD 16
+#define WLC_E_PRUNE_HOME_AP 17
+
+
+#define WLC_E_SUP_OTHER 0
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1
+#define WLC_E_SUP_BAD_UCAST_WEP128 2
+#define WLC_E_SUP_BAD_UCAST_WEP40 3
+#define WLC_E_SUP_UNSUP_KEY_LEN 4
+#define WLC_E_SUP_PW_KEY_CIPHER 5
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7
+#define WLC_E_SUP_NO_INSTALL_FLAG 8
+#define WLC_E_SUP_MSG3_NO_GTK 9
+#define WLC_E_SUP_GRP_KEY_CIPHER 10
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
+#define WLC_E_SUP_SEND_FAIL 13
+#define WLC_E_SUP_DEAUTH 14
+#define WLC_E_SUP_WPA_PSK_TMO 15
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+ uint16 version;
+ uint16 channel;
+ int32 rssi;
+ uint32 mactime;
+ uint32 rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+
+typedef struct wl_event_data_if {
+ uint8 ifidx;
+ uint8 opcode;
+ uint8 reserved;
+ uint8 bssidx;
+ uint8 role;
+} wl_event_data_if_t;
+
+
+#define WLC_E_IF_ADD 1
+#define WLC_E_IF_DEL 2
+#define WLC_E_IF_CHANGE 3
+
+
+#define WLC_E_IF_ROLE_STA 0
+#define WLC_E_IF_ROLE_AP 1
+#define WLC_E_IF_ROLE_WDS 2
+#define WLC_E_IF_ROLE_P2P_GO 3
+#define WLC_E_IF_ROLE_P2P_CLIENT 4
+#ifdef WLBTAMP
+#define WLC_E_IF_ROLE_BTA_CREATOR 5
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6
+#endif
+
+
+#define WLC_E_LINK_BCN_LOSS 1
+#define WLC_E_LINK_DISASSOC 2
+#define WLC_E_LINK_ASSOC_REC 3
+#define WLC_E_LINK_BSSCFG_DIS 4
+
+
+#define WLC_E_OVL_DOWNLOAD 0
+#define WLC_E_OVL_UPDATE_IND 1
+
+
+#define WLC_E_TDLS_PEER_DISCOVERED 0
+#define WLC_E_TDLS_PEER_CONNECTED 1
+#define WLC_E_TDLS_PEER_DISCONNECTED 2
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 000000000000..d5c3b76da5a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h 290206 2011-10-17 19:13:51Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET 0x0
+#define IP_VER_MASK 0xf0
+#define IP_VER_SHIFT 4
+#define IP_VER_4 4
+#define IP_VER_6 6
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1
+#define IP_PROT_IGMP 0x2
+#define IP_PROT_TCP 0x6
+#define IP_PROT_UDP 0x11
+#define IP_PROT_ICMP6 0x3a
+
+
+#define IPV4_VER_HL_OFFSET 0
+#define IPV4_TOS_OFFSET 1
+#define IPV4_PKTLEN_OFFSET 2
+#define IPV4_PKTFLAG_OFFSET 6
+#define IPV4_PROT_OFFSET 9
+#define IPV4_CHKSUM_OFFSET 10
+#define IPV4_SRC_IP_OFFSET 12
+#define IPV4_DEST_IP_OFFSET 16
+#define IPV4_OPTIONS_OFFSET 20
+
+
+#define IPV4_VER_MASK 0xf0
+#define IPV4_VER_SHIFT 4
+
+#define IPV4_HLEN_MASK 0x0f
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN 4
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define IPV4_TOS_DSCP_MASK 0xfc
+#define IPV4_TOS_DSCP_SHIFT 2
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0
+#define IPV4_TOS_PREC_SHIFT 5
+
+#define IPV4_TOS_LOWDELAY 0x10
+#define IPV4_TOS_THROUGHPUT 0x8
+#define IPV4_TOS_RELIABILITY 0x4
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV 0x8000
+#define IPV4_FRAG_DONT 0x4000
+#define IPV4_FRAG_MORE 0x2000
+#define IPV4_FRAG_OFFSET_MASK 0x1fff
+
+#define IPV4_ADDR_STR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+ uint8 addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+ uint8 version_ihl;
+ uint8 tos;
+ uint16 tot_len;
+ uint16 id;
+ uint16 frag;
+ uint8 ttl;
+ uint8 prot;
+ uint16 hdr_chksum;
+ uint8 src_ip[IPV4_ADDR_LEN];
+ uint8 dst_ip[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET 4
+#define IPV6_NEXT_HDR_OFFSET 6
+#define IPV6_HOP_LIMIT_OFFSET 7
+#define IPV6_SRC_IP_OFFSET 8
+#define IPV6_DEST_IP_OFFSET 24
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16
+
+
+#define IP_TOS46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+
+#define IPV6_EXTHDR_HOP 0
+#define IPV6_EXTHDR_ROUTING 43
+#define IPV6_EXTHDR_FRAGMENT 44
+#define IPV6_EXTHDR_AUTH 51
+#define IPV6_EXTHDR_NONE 59
+#define IPV6_EXTHDR_DEST 60
+
+#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \
+ ((prot) == IPV6_EXTHDR_ROUTING) || \
+ ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+ ((prot) == IPV6_EXTHDR_AUTH) || \
+ ((prot) == IPV6_EXTHDR_NONE) || \
+ ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 40
+
+#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+ uint8 nexthdr;
+ uint8 hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+ uint8 nexthdr;
+ uint8 rsvd;
+ uint16 frag_off;
+ uint32 ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+ uint16 len = 0, hlen;
+ struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+ while (IPV6_EXTHDR(eh->nexthdr)) {
+ if (eh->nexthdr == IPV6_EXTHDR_NONE)
+ return -1;
+ else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+ hlen = 8;
+ else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+ hlen = (eh->hdrlen + 2) << 2;
+ else
+ hlen = IPV6_EXTHDR_LEN(eh);
+
+ len += hlen;
+ eh = (struct ipv6_exthdr *)(h + len);
+ }
+
+ *proto = eh->nexthdr;
+ return len;
+}
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
new file mode 100644
index 000000000000..953339182741
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * $Id: bcmipv6.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ICMPV6_HEADER_TYPE 0x3A
+#define ICMPV6_PKT_TYPE_NS 135
+#define ICMPV6_PKT_TYPE_NA 136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1
+
+#define IPV6_VERSION 6
+#define IPV6_HOP_LIMIT 255
+
+#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \
+ a[5] | a[6] | a[7] | a[8] | a[9] | \
+ a[10] | a[11] | a[12] | a[13] | \
+ a[14] | a[15]) == 0)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+ uint8 addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+ uint8 icmp6_type;
+ uint8 icmp6_code;
+ uint16 icmp6_cksum;
+ BWL_PRE_PACKED_STRUCT union {
+ uint32 reserved;
+ BWL_PRE_PACKED_STRUCT struct nd_advt {
+ uint32 reserved1:5,
+ override:1,
+ solicited:1,
+ router:1,
+ reserved2:24;
+ } BWL_POST_PACKED_STRUCT nd_advt;
+ } BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+ uint8 priority:4,
+ version:4;
+ uint8 flow_lbl[3];
+ uint16 payload_len;
+ uint8 nexthdr;
+ uint8 hop_limit;
+ struct ipv6_addr saddr;
+ struct ipv6_addr daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg {
+ struct icmp6_hdr icmph;
+ struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+ uint8 type;
+ uint8 len;
+ uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* !defined(_bcmipv6_h_) */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 000000000000..8617985dd36d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,441 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h 294267 2011-11-04 23:41:52Z $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+ uint16 opcode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE 255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+ uint8 plh;
+ uint8 offset[2]; /* length so far */
+ uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+ uint8 plh;
+ uint8 offset[2];
+ uint8 len[2];
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+ uint8 plh;
+ uint8 key_length;
+ uint8 key_type;
+ uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+ uint8 plh;
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+ uint8 id;
+ uint8 service_type;
+ uint8 max_sdu[2];
+ uint8 sdu_ia_time[4];
+ uint8 access_latency[4];
+ uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+ uint8 llh[2];
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+ uint8 plh;
+ uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+ uint16 bredr;
+ plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+ hci_handle_t handle;
+ uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+ uint8 llh[2];
+ uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+ uint8 llh[2];
+ uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC 0
+#define EFS_SVCTYPE_BEST_EFFORT 1
+#define EFS_SVCTYPE_GUARANTEED 2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+ uint8 ecode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete 0x0E
+#define HCI_Command_Status 0x0F
+#define HCI_Flush_Occurred 0x11
+#define HCI_Enhanced_Flush_Complete 0x39
+#define HCI_Physical_Link_Complete 0x40
+#define HCI_Channel_Select 0x41
+#define HCI_Disconnect_Physical_Link_Complete 0x42
+#define HCI_Logical_Link_Complete 0x45
+#define HCI_Disconnect_Logical_Link_Complete 0x46
+#define HCI_Flow_Spec_Modify_Complete 0x47
+#define HCI_Number_of_Completed_Data_Blocks 0x48
+#define HCI_Short_Range_Mode_Change_Complete 0x4C
+#define HCI_Status_Change_Event 0x4D
+#define HCI_Vendor_Specific 0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask 0x0001
+#define HCI_Channel_Select_Event_Mask 0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004
+#define HCI_Logical_Link_Complete_Event_Mask 0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000
+#define HCI_Status_Change_Event_Mask 0x2000
+#define HCI_All_Event_Mask 0x31e7
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+ uint8 status;
+ uint8 cmdpkts;
+ uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+ uint8 cmdpkts;
+ uint16 opcode;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint16 len;
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+ uint8 status;
+ uint8 AMP_status;
+ uint32 bandwidth;
+ uint32 gbandwidth;
+ uint32 latency;
+ uint32 PDU_size;
+ uint8 ctrl_type;
+ uint16 PAL_cap;
+ uint16 AMP_ASSOC_len;
+ uint32 max_flush_timeout;
+ uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+ uint8 status;
+ uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+ uint8 status;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+ uint8 status;
+ uint16 ACL_pkt_len;
+ uint16 data_block_len;
+ uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+ uint16 handle;
+ uint16 pkts;
+ uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+ uint16 num_blocks;
+ uint8 num_handles;
+ data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+ uint8 status;
+ uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+ uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+ uint8 status;
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+ uint8 len;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+ uint8 status;
+ uint8 hci_version;
+ uint16 hci_revision;
+ uint8 pal_version;
+ uint16 mfg_name;
+ uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE 64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+ uint8 status;
+ uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+ uint8 status;
+ uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS 0x00
+#define HCI_ERR_ILLEGAL_COMMAND 0x01
+#define HCI_ERR_NO_CONNECTION 0x02
+#define HCI_ERR_MEMORY_FULL 0x07
+#define HCI_ERR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09
+#define HCI_ERR_CONNECTION_EXISTS 0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED 0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10
+#define HCI_ERR_UNSUPPORTED_VALUE 0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16
+#define HCI_ERR_UNSPECIFIED 0x1F
+#define HCI_ERR_UNIT_KEY_USED 0x26
+#define HCI_ERR_QOS_REJECTED 0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39
+#define HCI_ERR_CHANNEL_MOVE 0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+ uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+ uint16 dlen; /* data total length */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+ uint8 ScheduleKnown;
+ uint8 NumReports;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+ uint32 StartTime;
+ uint32 Duration;
+ uint32 Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN 0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 000000000000..8936d1641a3d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,193 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+ unsigned char body[1]; /* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION 2
+#define WPA_EAPOL_VERSION 1
+#define LEAP_EAPOL_VERSION 1
+#define SES_EAPOL_VERSION 1
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN 8
+#define EAPOL_KEY_IV_LEN 16
+#define EAPOL_KEY_SIG_LEN 16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short length; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */
+ unsigned char index; /* Key Flags & Index */
+ unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */
+ unsigned char key[1]; /* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK 0x80
+#define EAPOL_KEY_BROADCAST 0
+#define EAPOL_KEY_UNICAST 0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK 0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN 8
+#define EAPOL_WPA_KEY_NONCE_LEN 32
+#define EAPOL_WPA_KEY_IV_LEN 16
+#define EAPOL_WPA_KEY_RSC_LEN 8
+#define EAPOL_WPA_KEY_ID_LEN 8
+#define EAPOL_WPA_KEY_MIC_LEN 16
+#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE 32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */
+ unsigned short data_len; /* Key Data Length */
+ unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1 0x01
+#define WPA_KEY_DESC_V2 0x02
+#define WPA_KEY_DESC_V3 0x03
+#define WPA_KEY_PAIRWISE 0x08
+#define WPA_KEY_INSTALL 0x40
+#define WPA_KEY_ACK 0x80
+#define WPA_KEY_MIC 0x100
+#define WPA_KEY_SECURE 0x200
+#define WPA_KEY_ERROR 0x400
+#define WPA_KEY_REQ 0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0 0x00
+#define WPA_KEY_INDEX_1 0x10
+#define WPA_KEY_INDEX_2 0x20
+#define WPA_KEY_INDEX_3 0x30
+#define WPA_KEY_INDEX_MASK 0x30
+#define WPA_KEY_INDEX_SHIFT 0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA 0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 type;
+ uint8 length;
+ uint8 oui[3];
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK 1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2
+#define WPA2_KEY_DATA_SUBTYPE_MAC 3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK 9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 flags;
+ uint8 reserved;
+ uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2
+
+#define WPA2_GTK_INDEX_MASK 0x03
+#define WPA2_GTK_INDEX_SHIFT 0x00
+
+#define WPA2_GTK_TRANSMIT 0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 key_id;
+ uint8 ipn[6];
+ uint8 key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 reserved[2];
+ uint8 mac[ETHER_ADDR_LEN];
+ uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD 0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 000000000000..e45518564c17
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,162 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define ETHER_ADDR_LEN 6
+
+
+#define ETHER_TYPE_LEN 2
+
+
+#define ETHER_CRC_LEN 4
+
+
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define ETHER_MIN_LEN 64
+
+
+#define ETHER_MIN_DATA 46
+
+
+#define ETHER_MAX_LEN 1518
+
+
+#define ETHER_MAX_DATA 1500
+
+
+#define ETHER_TYPE_MIN 0x0600
+#define ETHER_TYPE_IP 0x0800
+#define ETHER_TYPE_ARP 0x0806
+#define ETHER_TYPE_8021Q 0x8100
+#define ETHER_TYPE_IPV6 0x86dd
+#define ETHER_TYPE_BRCM 0x886c
+#define ETHER_TYPE_802_1X 0x888e
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
+#define ETHER_TYPE_WAI 0x88b4
+#define ETHER_TYPE_89_0D 0x890d
+
+#define ETHER_TYPE_IPV6 0x86dd
+
+
+#define ETHER_BRCM_SUBTYPE_LEN 4
+
+
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+
+
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \
+ ((uint8 *)ea)[0] = 0x01; \
+ ((uint8 *)ea)[1] = 0x00; \
+ ((uint8 *)ea)[2] = 0x5e; \
+ ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \
+ ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \
+ ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \
+}
+
+#ifndef __INCif_etherh
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct ether_addr {
+ uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif
+
+
+#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define ether_cmp(a, b) (!(((short*)(a))[0] == ((short*)(b))[0]) | \
+ !(((short*)(a))[1] == ((short*)(b))[1]) | \
+ !(((short*)(a))[2] == ((short*)(b))[2]))
+
+
+#define ether_copy(s, d) { \
+ ((short*)(d))[0] = ((const short*)(s))[0]; \
+ ((short*)(d))[1] = ((const short*)(s))[1]; \
+ ((short*)(d))[2] = ((const short*)(s))[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \
+ ((uint8 *)(ea))[1] & \
+ ((uint8 *)(ea))[2] & \
+ ((uint8 *)(ea))[3] & \
+ ((uint8 *)(ea))[4] & \
+ ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \
+ ((uint8 *)(ea))[1] | \
+ ((uint8 *)(ea))[2] | \
+ ((uint8 *)(ea))[3] | \
+ ((uint8 *)(ea))[4] | \
+ ((uint8 *)(ea))[5]) == 0)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+ struct ether_header t; \
+ t = *(struct ether_header *)(s); \
+ *(struct ether_header *)(d) = t; \
+} while (0)
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 000000000000..19493eb18b1c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h 326276 2012-04-06 23:16:42Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+
+#include <packed_section_start.h>
+
+
+
+#define P2P_OUI WFA_OUI
+#define P2P_VER WFA_OUI_TYPE_P2P
+
+#define P2P_IE_ID 0xdd
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+ uint8 id;
+ uint8 len;
+ uint8 OUI[3];
+ uint8 oui_type;
+ uint8 subelts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN 6
+
+#define P2P_ATTR_ID_OFF 0
+#define P2P_ATTR_LEN_OFF 1
+#define P2P_ATTR_DATA_OFF 3
+
+#define P2P_ATTR_ID_LEN 1
+#define P2P_ATTR_LEN_LEN 2
+#define P2P_ATTR_HDR_LEN 3
+
+
+#define P2P_SEID_STATUS 0
+#define P2P_SEID_MINOR_RC 1
+#define P2P_SEID_P2P_INFO 2
+#define P2P_SEID_DEV_ID 3
+#define P2P_SEID_INTENT 4
+#define P2P_SEID_CFG_TIMEOUT 5
+#define P2P_SEID_CHANNEL 6
+#define P2P_SEID_GRP_BSSID 7
+#define P2P_SEID_XT_TIMING 8
+#define P2P_SEID_INTINTADDR 9
+#define P2P_SEID_P2P_MGBTY 10
+#define P2P_SEID_CHAN_LIST 11
+#define P2P_SEID_ABSENCE 12
+#define P2P_SEID_DEV_INFO 13
+#define P2P_SEID_GROUP_INFO 14
+#define P2P_SEID_GROUP_ID 15
+#define P2P_SEID_P2P_IF 16
+#define P2P_SEID_OP_CHANNEL 17
+#define P2P_SEID_INVITE_FLAGS 18
+#define P2P_SEID_VNDR 221
+
+#define P2P_SE_VS_ID_SERVICES 0x1b
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 dev;
+ uint8 group;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+
+#define P2P_CAPSE_DEV_SERVICE_DIS 0x1
+#define P2P_CAPSE_DEV_CLIENT_DIS 0x2
+#define P2P_CAPSE_DEV_CONCURRENT 0x4
+#define P2P_CAPSE_DEV_INFRA_MAN 0x8
+#define P2P_CAPSE_DEV_LIMIT 0x10
+#define P2P_CAPSE_INVITE_PROC 0x20
+
+
+#define P2P_CAPSE_GRP_OWNER 0x1
+#define P2P_CAPSE_PERSIST_GRP 0x2
+#define P2P_CAPSE_GRP_LIMIT 0x4
+#define P2P_CAPSE_GRP_INTRA_BSS 0x8
+#define P2P_CAPSE_GRP_X_CONNECT 0x10
+#define P2P_CAPSE_GRP_PERSISTENT 0x20
+#define P2P_CAPSE_GRP_FORMATION 0x40
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 intent;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 go_tmo;
+ uint8 client_tmo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 op_class;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint8 ssid[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint8 ifaddrs;
+ uint8 ifaddr[1][6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+
+#define P2P_STATSE_SUCCESS 0
+
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1
+
+#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2
+
+#define P2P_STATSE_FAIL_LIMIT_REACHED 3
+
+#define P2P_STATSE_FAIL_INVALID_PARAMS 4
+
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5
+
+#define P2P_STATSE_FAIL_PROTO_ERROR 6
+
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7
+
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8
+
+#define P2P_STATSE_FAIL_INTENT 9
+
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10
+
+#define P2P_STATSE_FAIL_USER_REJECT 11
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 avail[2];
+ uint8 interval[2];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN 10
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 band;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+ uint8 band;
+ uint8 num_channels;
+ uint8 channels[WL_NUMCHANNELS];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 num_entries;
+ wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+ uint16 cat_id;
+ uint8 OUI[3];
+ uint8 oui_type;
+ uint16 sub_cat_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint16 wps_cfg_meths;
+ uint8 pri_devtype[8];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN 8
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+ uint8 len;
+ uint8 devaddr[ETHER_ADDR_LEN];
+ uint8 ifaddr[ETHER_ADDR_LEN];
+ uint8 devcap;
+ uint8 cfg_meths[2];
+ uint8 pridt[P2P_DEV_TYPE_LEN];
+ uint8 secdts;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ struct ether_addr addr;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mg_bitmap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 op_class;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 flags;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 dialog_token;
+ uint8 elts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY 0x7f
+
+#define P2P_AF_FIXED_LEN 7
+
+
+#define P2P_AF_NOTICE_OF_ABSENCE 0
+#define P2P_AF_PRESENCE_REQ 1
+#define P2P_AF_PRESENCE_RSP 2
+#define P2P_AF_GO_DISC_REQ 3
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+ uint8 category;
+ uint8 action;
+ uint8 oui[3];
+ uint8 oui_type;
+ uint8 subtype;
+ uint8 dialog_token;
+ uint8 elts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN 8
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+
+
+#define P2P_PAF_GON_REQ 0
+#define P2P_PAF_GON_RSP 1
+#define P2P_PAF_GON_CONF 2
+#define P2P_PAF_INVITE_REQ 3
+#define P2P_PAF_INVITE_RSP 4
+#define P2P_PAF_DEVDIS_REQ 5
+#define P2P_PAF_DEVDIS_RSP 6
+#define P2P_PAF_PROVDIS_REQ 7
+#define P2P_PAF_PROVDIS_RSP 8
+
+
+#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+ uint8 cnt_type;
+ uint32 duration;
+ uint32 interval;
+ uint32 start;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 index;
+ uint8 ops_ctw_parms;
+ wifi_p2p_noa_desc_t desc[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN 5
+
+
+#define P2P_NOA_DESC_CNT_RESERVED 0
+#define P2P_NOA_DESC_CNT_REPEAT 255
+#define P2P_NOA_DESC_TYPE_PREFERRED 1
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2
+
+
+#define P2P_NOA_CTW_MASK 0x7f
+#define P2P_NOA_OPS_MASK 0x80
+#define P2P_NOA_OPS_SHIFT 7
+
+#define P2P_CTW_MIN 10
+
+
+#define P2PSD_ACTION_CATEGORY 0x04
+
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a
+
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
+
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
+
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
+
+#define P2PSD_AD_EID 0x6c
+
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
+
+#define P2PSD_ADP_PROTO_ID 0x00
+
+#define P2PSD_GAS_OUI P2P_OUI
+
+#define P2PSD_GAS_OUI_SUBTYPE P2P_VER
+
+#define P2PSD_GAS_NQP_INFOID 0xDDDD
+
+#define P2PSD_GAS_COMEBACKDEALY 0x00
+
+
+
+typedef enum p2psd_svc_protype {
+ SVC_RPOTYPE_ALL = 0,
+ SVC_RPOTYPE_BONJOUR = 1,
+ SVC_RPOTYPE_UPNP = 2,
+ SVC_RPOTYPE_WSD = 3,
+ SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+
+typedef enum {
+ P2PSD_RESP_STATUS_SUCCESS = 0,
+ P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+ P2PSD_RESP_STATUS_DATA_NA = 2,
+ P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+ uint8 llm_pamebi;
+ uint8 adp_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+ uint8 id;
+ uint8 len;
+ wifi_p2psd_adp_tpl_t adp_tpl;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+ uint8 oui_subtype;
+ uint16 svc_updi;
+ uint8 svc_tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+ uint16 len;
+ uint8 svc_prot;
+ uint8 svc_tscid;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+ uint16 info_id;
+ uint16 len;
+ uint8 oui[3];
+ uint8 qreq_vsc[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qreq_len;
+ uint8 qreq_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+ uint16 len;
+ uint8 svc_prot;
+ uint8 svc_tscid;
+ uint8 status;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+ uint16 info_id;
+ uint16 len;
+ uint8 oui[3];
+ uint8 qresp_vsc[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+ uint16 status;
+ uint16 cb_delay;
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qresp_len;
+ uint8 qresp_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+ uint16 status;
+ uint8 fragment_id;
+ uint16 cb_delay;
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qresp_len;
+ uint8 qresp_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+ uint8 category;
+ uint8 action;
+ uint8 dialog_token;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 000000000000..a4900edd4ac6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,75 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _SD_SPI_H
+#define _SD_SPI_H
+
+#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */
+#define SPI_START_S 31
+#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */
+#define SPI_DIR_S 30
+#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S 24
+#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */
+#define SPI_RW_S 23
+#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */
+#define SPI_FUNC_S 20
+#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */
+#define SPI_RAW_S 19
+#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */
+#define SPI_STUFF_S 18
+#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */
+#define SPI_BLKMODE_S 19
+#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */
+#define SPI_OPCODE_S 18
+#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */
+#define SPI_ADDR_S 1
+#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */
+#define SPI_STUFF0_S 0
+
+#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */
+#define SPI_RSP_START_S 7
+#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */
+#define SPI_RSP_PARAM_ERR_S 6
+#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */
+#define SPI_RSP_RFU5_S 5
+#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */
+#define SPI_RSP_FUNC_ERR_S 4
+#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */
+#define SPI_RSP_CRC_ERR_S 3
+#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */
+#define SPI_RSP_ILL_CMD_S 2
+#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */
+#define SPI_RSP_RFU1_S 1
+#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */
+#define SPI_RSP_IDLE_S 0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */
+#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK 0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 000000000000..9c94985cf952
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,69 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK 0xfff
+#define VLAN_CFI_SHIFT 12
+#define VLAN_PRI_SHIFT 13
+
+#define VLAN_PRI_MASK 7
+
+#define VLAN_TAG_LEN 4
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN)
+
+#define VLAN_TPID 0x8100
+
+struct ethervlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 vlan_type;
+ uint16 vlan_tag;
+ uint16 ether_type;
+};
+
+#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+ struct ethervlan_header t; \
+ t = *(struct ethervlan_header *)(s); \
+ *(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 000000000000..cc2ff5b315d7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,203 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h 261155 2011-05-23 23:51:32Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE 13
+#define DOT11_RC_MIC_FAILURE 14
+#define DOT11_RC_4WH_TIMEOUT 15
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
+#define DOT11_RC_WPA_IE_MISMATCH 17
+#define DOT11_RC_INVALID_MC_CIPHER 18
+#define DOT11_RC_INVALID_UC_CIPHER 19
+#define DOT11_RC_INVALID_AKMP 20
+#define DOT11_RC_BAD_WPA_VERSION 21
+#define DOT11_RC_INVALID_WPA_CAP 22
+#define DOT11_RC_8021X_AUTH_FAIL 23
+
+#define WPA2_PMKID_LEN 16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 tag;
+ uint8 length;
+ uint8 oui[3];
+ uint8 oui_type;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN 4
+#define WPA_IE_FIXED_LEN 8
+#define WPA_IE_TAG_FIXED_LEN 6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 tag;
+ uint8 length;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN 4
+#define WPA_RSN_IE_TAG_FIXED_LEN 2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 oui[3];
+ uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN 4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN 2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE 0
+#define WPA_CIPHER_WEP_40 1
+#define WPA_CIPHER_TKIP 2
+#define WPA_CIPHER_AES_OCB 3
+#define WPA_CIPHER_AES_CCM 4
+#define WPA_CIPHER_WEP_104 5
+#define WPA_CIPHER_BIP 6
+#define WPA_CIPHER_TPK 7
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4 11
+
+#define WAPI_CSE_WPI_SMS4 1
+#endif
+
+#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
+ (cipher) == WPA_CIPHER_WEP_40 || \
+ (cipher) == WPA_CIPHER_WEP_104 || \
+ (cipher) == WPA_CIPHER_TKIP || \
+ (cipher) == WPA_CIPHER_AES_OCB || \
+ (cipher) == WPA_CIPHER_AES_CCM || \
+ (cipher) == WPA_CIPHER_TPK)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
+ (cipher) == WAPI_CSE_WPI_SMS4)
+
+
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+ WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+ WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif
+
+
+#define WPA_TKIP_CM_DETECT 60
+#define WPA_TKIP_CM_BLOCK 60
+
+
+#define RSN_CAP_LEN 2
+
+
+#define RSN_CAP_PREAUTH 0x0001
+#define RSN_CAP_NOPAIRWISE 0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
+#define RSN_CAP_1_REPLAY_CNTR 0
+#define RSN_CAP_2_REPLAY_CNTRS 1
+#define RSN_CAP_4_REPLAY_CNTRS 2
+#define RSN_CAP_16_REPLAY_CNTRS 3
+#ifdef MFP
+#define RSN_CAP_MFPR 0x0040
+#define RSN_CAP_MFPC 0x0080
+#endif
+
+
+#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1)
+
+
+#define WPA_CAP_LEN RSN_CAP_LEN
+#define WPA_PMKID_CNT_LEN 2
+
+#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN 2
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
+
+
+#define WAPI_WAI_REQUEST 0x00F1
+#define WAPI_UNICAST_REKEY 0x00F2
+#define WAPI_STA_AGING 0x00F3
+#define WAPI_MUTIL_REKEY 0x00F4
+#define WAPI_STA_STATS 0x00F5
+
+#define WAPI_USK_REKEY_COUNT 0x4000000
+#define WAPI_MSK_REKEY_COUNT 0x4000000
+#endif
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wps.h b/drivers/net/wireless/bcmdhd/include/proto/wps.h
new file mode 100644
index 000000000000..cccbfff4cfbb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wps.h
@@ -0,0 +1,379 @@
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL 0x1001
+#define WPS_ID_ASSOC_STATE 0x1002
+#define WPS_ID_AUTH_TYPE 0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS 0x1004
+#define WPS_ID_AUTHENTICATOR 0x1005
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_CONFIG_ERROR 0x1009
+#define WPS_ID_CONF_URL4 0x100A
+#define WPS_ID_CONF_URL6 0x100B
+#define WPS_ID_CONN_TYPE 0x100C
+#define WPS_ID_CONN_TYPE_FLAGS 0x100D
+#define WPS_ID_CREDENTIAL 0x100E
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_E_HASH1 0x1014
+#define WPS_ID_E_HASH2 0x1015
+#define WPS_ID_E_SNONCE1 0x1016
+#define WPS_ID_E_SNONCE2 0x1017
+#define WPS_ID_ENCR_SETTINGS 0x1018
+#define WPS_ID_ENCR_TYPE 0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS 0x1010
+#define WPS_ID_ENROLLEE_NONCE 0x101A
+#define WPS_ID_FEATURE_ID 0x101B
+#define WPS_ID_IDENTITY 0x101C
+#define WPS_ID_IDENTITY_PROOF 0x101D
+#define WPS_ID_KEY_WRAP_AUTH 0x101E
+#define WPS_ID_KEY_IDENTIFIER 0x101F
+#define WPS_ID_MAC_ADDR 0x1020
+#define WPS_ID_MANUFACTURER 0x1021
+#define WPS_ID_MSG_TYPE 0x1022
+#define WPS_ID_MODEL_NAME 0x1023
+#define WPS_ID_MODEL_NUMBER 0x1024
+#define WPS_ID_NW_INDEX 0x1026
+#define WPS_ID_NW_KEY 0x1027
+#define WPS_ID_NW_KEY_INDEX 0x1028
+#define WPS_ID_NEW_DEVICE_NAME 0x1029
+#define WPS_ID_NEW_PWD 0x102A
+#define WPS_ID_OOB_DEV_PWD 0x102C
+#define WPS_ID_OS_VERSION 0x102D
+#define WPS_ID_POWER_LEVEL 0x102F
+#define WPS_ID_PSK_CURRENT 0x1030
+#define WPS_ID_PSK_MAX 0x1031
+#define WPS_ID_PUBLIC_KEY 0x1032
+#define WPS_ID_RADIO_ENABLED 0x1033
+#define WPS_ID_REBOOT 0x1034
+#define WPS_ID_REGISTRAR_CURRENT 0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST 0x1037
+#define WPS_ID_REGISTRAR_MAX 0x1038
+#define WPS_ID_REGISTRAR_NONCE 0x1039
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_RESP_TYPE 0x103B
+#define WPS_ID_RF_BAND 0x103C
+#define WPS_ID_R_HASH1 0x103D
+#define WPS_ID_R_HASH2 0x103E
+#define WPS_ID_R_SNONCE1 0x103F
+#define WPS_ID_R_SNONCE2 0x1040
+#define WPS_ID_SEL_REGISTRAR 0x1041
+#define WPS_ID_SERIAL_NUM 0x1042
+#define WPS_ID_SC_STATE 0x1044
+#define WPS_ID_SSID 0x1045
+#define WPS_ID_TOT_NETWORKS 0x1046
+#define WPS_ID_UUID_E 0x1047
+#define WPS_ID_UUID_R 0x1048
+#define WPS_ID_VENDOR_EXT 0x1049
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_X509_CERT_REQ 0x104B
+#define WPS_ID_X509_CERT 0x104C
+#define WPS_ID_EAP_IDENTITY 0x104D
+#define WPS_ID_MSG_COUNTER 0x104E
+#define WPS_ID_PUBKEY_HASH 0x104F
+#define WPS_ID_REKEY_KEY 0x1050
+#define WPS_ID_KEY_LIFETIME 0x1051
+#define WPS_ID_PERM_CFG_METHODS 0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ID_PORTABLE_DEVICE 0x1056
+#define WPS_ID_AP_SETUP_LOCKED 0x1057
+#define WPS_ID_APP_LIST 0x1058
+#define WPS_ID_EAP_TYPE 0x1059
+#define WPS_ID_INIT_VECTOR 0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO 0x1061
+#define WPS_ID_8021X_ENABLED 0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY 0x1064
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2 0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID "\x00\x01\x37"
+#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested.
+ * Not supported in Windows 7
+ */
+#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S 2
+#define WPS_ID_ASSOC_STATE_S 2
+#define WPS_ID_AUTH_TYPE_S 2
+#define WPS_ID_AUTH_TYPE_FLAGS_S 2
+#define WPS_ID_AUTHENTICATOR_S 8
+#define WPS_ID_CONFIG_METHODS_S 2
+#define WPS_ID_CONFIG_ERROR_S 2
+#define WPS_ID_CONN_TYPE_S 1
+#define WPS_ID_CONN_TYPE_FLAGS_S 1
+#define WPS_ID_DEVICE_PWD_ID_S 2
+#define WPS_ID_ENCR_TYPE_S 2
+#define WPS_ID_ENCR_TYPE_FLAGS_S 2
+#define WPS_ID_FEATURE_ID_S 4
+#define WPS_ID_MAC_ADDR_S 6
+#define WPS_ID_MSG_TYPE_S 1
+#define WPS_ID_SC_STATE_S 1
+#define WPS_ID_RF_BAND_S 1
+#define WPS_ID_OS_VERSION_S 4
+#define WPS_ID_VERSION_S 1
+#define WPS_ID_SEL_REGISTRAR_S 1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S 1
+#define WPS_ID_RESP_TYPE_S 1
+#define WPS_ID_AP_SETUP_LOCKED_S 1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S 1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED 0
+#define WPS_ASSOC_CONN_SUCCESS 1
+#define WPS_ASSOC_CONFIG_FAIL 2
+#define WPS_ASSOC_ASSOC_FAIL 3
+#define WPS_ASSOC_IP_FAIL 4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN 0x0001
+#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2 0x0010
+#define WPS_AUTHTYPE_WPA2PSK 0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL 0x0004
+#define WPS_CONFMET_DISPLAY 0x0008
+#define WPS_CONFMET_EXT_NFC_TOK 0x0010
+#define WPS_CONFMET_INT_NFC_TOK 0x0020
+#define WPS_CONFMET_NFC_INTF 0x0040
+#define WPS_CONFMET_PBC 0x0080
+#define WPS_CONFMET_KEYPAD 0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC 0x0280
+#define WPS_CONFMET_PHY_PBC 0x0480
+#define WPS_CONFMET_VIRT_DISPLAY 0x2008
+#define WPS_CONFMET_PHY_DISPLAY 0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR 0
+#define WPS_ERROR_OOB_INT_READ_ERR 1
+#define WPS_ERROR_DECRYPT_CRC_FAIL 2
+#define WPS_ERROR_CHAN24_NOT_SUPP 3
+#define WPS_ERROR_CHAN50_NOT_SUPP 4
+#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR 11
+#define WPS_ERROR_MULTI_PBC_DETECTED 12
+#define WPS_ERROR_ROGUE_SUSPECTED 13
+#define WPS_ERROR_DEVICE_BUSY 14
+#define WPS_ERROR_SETUP_LOCKED 15
+#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS 0x01
+#define WPS_CONNTYPE_IBSS 0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT 0x0000
+#define WPS_DEVICEPWDID_USER_SPEC 0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002
+#define WPS_DEVICEPWDID_REKEY 0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN 0x0004
+#define WPS_DEVICEPWDID_REG_SPEC 0x0005
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE 0x0001
+#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only
+ * be advertised on the AP when Mixed Mode
+ * is enabled (Encryption Type is 0x000c).
+ */
+#define WPS_ENCRTYPE_AES 0x0008
+
+
+/* WPS Message Types */
+#define WPS_ID_BEACON 0x01
+#define WPS_ID_PROBE_REQ 0x02
+#define WPS_ID_PROBE_RESP 0x03
+#define WPS_ID_MESSAGE_M1 0x04
+#define WPS_ID_MESSAGE_M2 0x05
+#define WPS_ID_MESSAGE_M2D 0x06
+#define WPS_ID_MESSAGE_M3 0x07
+#define WPS_ID_MESSAGE_M4 0x08
+#define WPS_ID_MESSAGE_M5 0x09
+#define WPS_ID_MESSAGE_M6 0x0A
+#define WPS_ID_MESSAGE_M7 0x0B
+#define WPS_ID_MESSAGE_M8 0x0C
+#define WPS_ID_MESSAGE_ACK 0x0D
+#define WPS_ID_MESSAGE_NACK 0x0E
+#define WPS_ID_MESSAGE_DONE 0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6)
+
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER 1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2
+#define WPS_DEVICE_TYPE_CAT_PRINTER 3
+#define WPS_DEVICE_TYPE_CAT_CAMERA 4
+#define WPS_DEVICE_TYPE_CAT_STORAGE 5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */
+
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_MSGTYPE_REGISTRAR 0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR 0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ 0x01
+#define WPS_RFBAND_50GHZ 0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED 0x01
+#define WPS_SCSTATE_CONFIGURED 0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
diff --git a/drivers/net/wireless/bcmdhd/include/rwl_wifi.h b/drivers/net/wireless/bcmdhd/include/rwl_wifi.h
new file mode 100644
index 000000000000..187c2bc6337d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/rwl_wifi.h
@@ -0,0 +1,96 @@
+/*
+ * RWL definitions of
+ * Broadcom 802.11bang Networking Device Driver
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: rwl_wifi.h 281527 2011-09-02 17:12:53Z $
+ *
+ */
+
+#ifndef _rwl_wifi_h_
+#define _rwl_wifi_h_
+
+#if defined(RWL_WIFI) || defined(WIFI_REFLECTOR) || defined(RFAWARE)
+
+#define RWL_ACTION_WIFI_CATEGORY 127 /* Vendor-specific category value for WiFi */
+#define RWL_WIFI_OUI_BYTE0 0x00 /* BRCM-specific public OUI */
+#define RWL_WIFI_OUI_BYTE1 0x90
+#define RWL_WIFI_OUI_BYTE2 0x4c
+#define RWL_WIFI_ACTION_FRAME_SIZE sizeof(struct dot11_action_wifi_vendor_specific)
+#define RWL_WIFI_FIND_MY_PEER 0x09 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER 0x0A /* Server response to the client */
+#define RWL_WIFI_DEFAULT 0x00
+#define RWL_ACTION_WIFI_FRAG_TYPE 0x55 /* Fragment indicator for receiver */
+
+/*
+ * Information about the action frame data fields in the dot11_action_wifi_vendor_specific
+ * cdc structure (1 to 16). This does not include the status flag. Since this
+ * is not directly visible to the driver code, we can't use sizeof(struct cdc_ioctl).
+ * Hence Ref MAC address offset starts from byte 17.
+ * REF MAC ADDR (6 bytes (MAC Address len) from byte 17 to 22)
+ * DUT MAC ADDR (6 bytes after the REF MAC Address byte 23 to 28)
+ * unused (byte 29 to 49)
+ * REF/Client Channel offset (50)
+ * DUT/Server channel offset (51)
+ * ---------------------------------------------------------------------------------------
+ * cdc struct|REF MAC ADDR|DUT_MAC_ADDR|un used|REF Channel|DUT channel|Action frame Data|
+ * 1---------17-----------23-------------------50----------51----------52----------------1040
+ * REF MAC addr after CDC struct without status flag (status flag not used by wifi)
+ */
+
+#define RWL_REF_MAC_ADDRESS_OFFSET 17
+#define RWL_DUT_MAC_ADDRESS_OFFSET 23
+#define RWL_WIFI_CLIENT_CHANNEL_OFFSET 50
+#define RWL_WIFI_SERVER_CHANNEL_OFFSET 51
+
+#ifdef WIFI_REFLECTOR
+#include <bcmcdc.h>
+#define REMOTE_FINDSERVER_CMD 16
+#define RWL_WIFI_ACTION_CMD "wifiaction"
+#define RWL_WIFI_ACTION_CMD_LEN 11 /* With the NULL terminator */
+#define REMOTE_SET_CMD 1
+#define REMOTE_GET_CMD 2
+#define REMOTE_REPLY 4
+#define RWL_WIFI_DEFAULT_TYPE 0x00
+#define RWL_WIFI_DEFAULT_SUBTYPE 0x00
+#define RWL_ACTION_FRAME_DATA_SIZE 1024 /* fixed size for the wifi frame data */
+#define RWL_WIFI_CDC_HEADER_OFFSET 0
+#define RWL_WIFI_FRAG_DATA_SIZE 960 /* max size of the frag data */
+#define RWL_DEFAULT_WIFI_FRAG_COUNT 127 /* maximum fragment count */
+#define RWL_WIFI_RETRY 5 /* CMD retry count for wifi */
+#define RWL_WIFI_SEND 5 /* WIFI frame sent count */
+#define RWL_WIFI_SEND_DELAY 100 /* delay between two frames */
+#define MICROSEC_CONVERTOR_VAL 1000
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+typedef struct rem_packet {
+ rem_ioctl_t rem_cdc;
+ uchar message [RWL_ACTION_FRAME_DATA_SIZE];
+} rem_packet_t;
+
+struct send_packet {
+ char command [RWL_WIFI_ACTION_CMD_LEN];
+ dot11_action_wifi_vendor_specific_t response;
+} PACKED;
+typedef struct send_packet send_packet_t;
+
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+#endif /* WIFI_REFLECTOR */
+
+typedef struct rwl_request {
+ struct rwl_request* next_request;
+ struct dot11_action_wifi_vendor_specific action_frame;
+} rwl_request_t;
+
+
+#endif /* defined(RWL_WIFI) || defined(WIFI_REFLECTOR) */
+#endif /* _rwl_wifi_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 000000000000..761bc887c94a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,2233 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 328358 2012-04-18 23:14:31Z $
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+typedef struct eci_prerev35 {
+ uint32 eci_output;
+ uint32 eci_control;
+ uint32 eci_inputlo;
+ uint32 eci_inputmi;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolaritymi;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskmi;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventmi;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskmi;
+ uint32 eci_eventmaskhi;
+ uint32 PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+ uint32 eci_outputlo;
+ uint32 eci_outputhi;
+ uint32 eci_controllo;
+ uint32 eci_controlhi;
+ uint32 eci_inputlo;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskhi;
+ uint32 eci_auxtx;
+ uint32 eci_auxrx;
+ uint32 eci_datatag;
+ uint32 eci_uartescvalue;
+ uint32 eci_autobaudctr;
+ uint32 eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+ uint32 PAD[19];
+
+ uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+ uint32 chipid;
+ uint32 capabilities;
+ uint32 corecontrol;
+ uint32 bist;
+
+
+ uint32 otpstatus;
+ uint32 otpcontrol;
+ uint32 otpprog;
+ uint32 otplayout;
+
+
+ uint32 intstatus;
+ uint32 intmask;
+
+
+ uint32 chipcontrol;
+ uint32 chipstatus;
+
+
+ uint32 jtagcmd;
+ uint32 jtagir;
+ uint32 jtagdr;
+ uint32 jtagctrl;
+
+
+ uint32 flashcontrol;
+ uint32 flashaddress;
+ uint32 flashdata;
+ uint32 otplayoutextension;
+
+
+ uint32 broadcastaddress;
+ uint32 broadcastdata;
+
+
+ uint32 gpiopullup;
+ uint32 gpiopulldown;
+ uint32 gpioin;
+ uint32 gpioout;
+ uint32 gpioouten;
+ uint32 gpiocontrol;
+ uint32 gpiointpolarity;
+ uint32 gpiointmask;
+
+
+ uint32 gpioevent;
+ uint32 gpioeventintmask;
+
+
+ uint32 watchdog;
+
+
+ uint32 gpioeventintpolarity;
+
+
+ uint32 gpiotimerval;
+ uint32 gpiotimeroutmask;
+
+
+ uint32 clockcontrol_n;
+ uint32 clockcontrol_sb;
+ uint32 clockcontrol_pci;
+ uint32 clockcontrol_m2;
+ uint32 clockcontrol_m3;
+ uint32 clkdiv;
+ uint32 gpiodebugsel;
+ uint32 capabilities_ext;
+
+
+ uint32 pll_on_delay;
+ uint32 fref_sel_delay;
+ uint32 slow_clk_ctl;
+ uint32 PAD;
+
+
+ uint32 system_clk_ctl;
+ uint32 clkstatestretch;
+ uint32 PAD[2];
+
+
+ uint32 bp_addrlow;
+ uint32 bp_addrhigh;
+ uint32 bp_data;
+ uint32 PAD;
+ uint32 bp_indaccess;
+
+ uint32 gsioctrl;
+ uint32 gsioaddress;
+ uint32 gsiodata;
+
+
+ uint32 clkdiv2;
+
+ uint32 otpcontrol1;
+ uint32 fabid;
+
+
+ uint32 eromptr;
+
+
+ uint32 pcmcia_config;
+ uint32 pcmcia_memwait;
+ uint32 pcmcia_attrwait;
+ uint32 pcmcia_iowait;
+ uint32 ide_config;
+ uint32 ide_memwait;
+ uint32 ide_attrwait;
+ uint32 ide_iowait;
+ uint32 prog_config;
+ uint32 prog_waitcount;
+ uint32 flash_config;
+ uint32 flash_waitcount;
+ uint32 SECI_config;
+ uint32 SECI_status;
+ uint32 SECI_statusmask;
+ uint32 SECI_rxnibchanged;
+
+ uint32 PAD[20];
+
+
+ uint32 sromcontrol;
+ uint32 sromaddress;
+ uint32 sromdata;
+ uint32 PAD[1];
+
+ uint32 nflashctrl;
+ uint32 nflashconf;
+ uint32 nflashcoladdr;
+ uint32 nflashrowaddr;
+ uint32 nflashdata;
+ uint32 nflashwaitcnt0;
+ uint32 PAD[2];
+
+ uint32 seci_uart_data;
+ uint32 seci_uart_bauddiv;
+ uint32 seci_uart_fcr;
+ uint32 seci_uart_lcr;
+ uint32 seci_uart_mcr;
+ uint32 seci_uart_lsr;
+ uint32 seci_uart_msr;
+ uint32 seci_uart_baudadj;
+
+ uint32 clk_ctl_st;
+ uint32 hw_war;
+ uint32 PAD[70];
+
+
+ uint8 uart0data;
+ uint8 uart0imr;
+ uint8 uart0fcr;
+ uint8 uart0lcr;
+ uint8 uart0mcr;
+ uint8 uart0lsr;
+ uint8 uart0msr;
+ uint8 uart0scratch;
+ uint8 PAD[248];
+
+ uint8 uart1data;
+ uint8 uart1imr;
+ uint8 uart1fcr;
+ uint8 uart1lcr;
+ uint8 uart1mcr;
+ uint8 uart1lsr;
+ uint8 uart1msr;
+ uint8 uart1scratch;
+ uint32 PAD[126];
+
+
+
+ uint32 pmucontrol;
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel;
+ uint32 gpioenable;
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 PAD;
+ uint32 chipcontrol_addr;
+ uint32 chipcontrol_data;
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt;
+ uint32 pmu_xtalfreq;
+ uint32 PAD[100];
+ uint16 sromotp[512];
+
+ uint32 nand_revision;
+ uint32 nand_cmd_start;
+ uint32 nand_cmd_addr_x;
+ uint32 nand_cmd_addr;
+ uint32 nand_cmd_end_addr;
+ uint32 nand_cs_nand_select;
+ uint32 nand_cs_nand_xor;
+ uint32 PAD;
+ uint32 nand_spare_rd0;
+ uint32 nand_spare_rd4;
+ uint32 nand_spare_rd8;
+ uint32 nand_spare_rd12;
+ uint32 nand_spare_wr0;
+ uint32 nand_spare_wr4;
+ uint32 nand_spare_wr8;
+ uint32 nand_spare_wr12;
+ uint32 nand_acc_control;
+ uint32 PAD;
+ uint32 nand_config;
+ uint32 PAD;
+ uint32 nand_timing_1;
+ uint32 nand_timing_2;
+ uint32 nand_semaphore;
+ uint32 PAD;
+ uint32 nand_devid;
+ uint32 nand_devid_x;
+ uint32 nand_block_lock_status;
+ uint32 nand_intfc_status;
+ uint32 nand_ecc_corr_addr_x;
+ uint32 nand_ecc_corr_addr;
+ uint32 nand_ecc_unc_addr_x;
+ uint32 nand_ecc_unc_addr;
+ uint32 nand_read_error_count;
+ uint32 nand_corr_stat_threshold;
+ uint32 PAD[2];
+ uint32 nand_read_addr_x;
+ uint32 nand_read_addr;
+ uint32 nand_page_program_addr_x;
+ uint32 nand_page_program_addr;
+ uint32 nand_copy_back_addr_x;
+ uint32 nand_copy_back_addr;
+ uint32 nand_block_erase_addr_x;
+ uint32 nand_block_erase_addr;
+ uint32 nand_inv_read_addr_x;
+ uint32 nand_inv_read_addr;
+ uint32 PAD[2];
+ uint32 nand_blk_wr_protect;
+ uint32 PAD[3];
+ uint32 nand_acc_control_cs1;
+ uint32 nand_config_cs1;
+ uint32 nand_timing_1_cs1;
+ uint32 nand_timing_2_cs1;
+ uint32 PAD[20];
+ uint32 nand_spare_rd16;
+ uint32 nand_spare_rd20;
+ uint32 nand_spare_rd24;
+ uint32 nand_spare_rd28;
+ uint32 nand_cache_addr;
+ uint32 nand_cache_data;
+ uint32 nand_ctrl_config;
+ uint32 nand_ctrl_status;
+} chipcregs_t;
+
+#endif
+
+
+#define CC_CHIPID 0
+#define CC_CAPABILITIES 4
+#define CC_CHIPST 0x2c
+#define CC_EROMPTR 0xfc
+
+#define CC_OTPST 0x10
+#define CC_JTAGCMD 0x30
+#define CC_JTAGIR 0x34
+#define CC_JTAGDR 0x38
+#define CC_JTAGCTRL 0x3c
+#define CC_GPIOPU 0x58
+#define CC_GPIOPD 0x5c
+#define CC_GPIOIN 0x60
+#define CC_GPIOOUT 0x64
+#define CC_GPIOOUTEN 0x68
+#define CC_GPIOCTRL 0x6c
+#define CC_GPIOPOL 0x70
+#define CC_GPIOINTM 0x74
+#define CC_WATCHDOG 0x80
+#define CC_CLKC_N 0x90
+#define CC_CLKC_M0 0x94
+#define CC_CLKC_M1 0x98
+#define CC_CLKC_M2 0x9c
+#define CC_CLKC_M3 0xa0
+#define CC_CLKDIV 0xa4
+#define CC_SYS_CLK_CTL 0xc0
+#define CC_CLK_CTL_ST SI_CLK_CTL_ST
+#define PMU_CTL 0x600
+#define PMU_CAP 0x604
+#define PMU_ST 0x608
+#define PMU_RES_STATE 0x60c
+#define PMU_TIMER 0x614
+#define PMU_MIN_RES_MASK 0x618
+#define PMU_MAX_RES_MASK 0x61c
+#define CC_CHIPCTL_ADDR 0x650
+#define CC_CHIPCTL_DATA 0x654
+#define PMU_REG_CONTROL_ADDR 0x658
+#define PMU_REG_CONTROL_DATA 0x65C
+#define PMU_PLL_CONTROL_ADDR 0x660
+#define PMU_PLL_CONTROL_DATA 0x664
+#define CC_SROM_OTP 0x800
+
+#ifdef NFLASH_SUPPORT
+
+#define CC_NAND_REVISION 0xC00
+#define CC_NAND_CMD_START 0xC04
+#define CC_NAND_CMD_ADDR 0xC0C
+#define CC_NAND_SPARE_RD_0 0xC20
+#define CC_NAND_SPARE_RD_4 0xC24
+#define CC_NAND_SPARE_RD_8 0xC28
+#define CC_NAND_SPARE_RD_C 0xC2C
+#define CC_NAND_CONFIG 0xC48
+#define CC_NAND_DEVID 0xC60
+#define CC_NAND_DEVID_EXT 0xC64
+#define CC_NAND_INTFC_STATUS 0xC6C
+#endif
+
+
+#define CID_ID_MASK 0x0000ffff
+#define CID_REV_MASK 0x000f0000
+#define CID_REV_SHIFT 16
+#define CID_PKG_MASK 0x00f00000
+#define CID_PKG_SHIFT 20
+#define CID_CC_MASK 0x0f000000
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000
+#define CID_TYPE_SHIFT 28
+
+
+#define CC_CAP_UARTS_MASK 0x00000003
+#define CC_CAP_MIPSEB 0x00000004
+#define CC_CAP_UCLKSEL 0x00000018
+#define CC_CAP_UINTCLK 0x00000008
+#define CC_CAP_UARTGPIO 0x00000020
+#define CC_CAP_EXTBUS_MASK 0x000000c0
+#define CC_CAP_EXTBUS_NONE 0x00000000
+#define CC_CAP_EXTBUS_FULL 0x00000040
+#define CC_CAP_EXTBUS_PROG 0x00000080
+#define CC_CAP_FLASH_MASK 0x00000700
+#define CC_CAP_PLL_MASK 0x00038000
+#define CC_CAP_PWR_CTL 0x00040000
+#define CC_CAP_OTPSIZE 0x00380000
+#define CC_CAP_OTPSIZE_SHIFT 19
+#define CC_CAP_OTPSIZE_BASE 5
+#define CC_CAP_JTAGP 0x00400000
+#define CC_CAP_ROM 0x00800000
+#define CC_CAP_BKPLN64 0x08000000
+#define CC_CAP_PMU 0x10000000
+#define CC_CAP_ECI 0x20000000
+#define CC_CAP_SROM 0x40000000
+#define CC_CAP_NFLASH 0x80000000
+
+#define CC_CAP2_SECI 0x00000001
+#define CC_CAP2_GSIO 0x00000002
+
+
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001
+
+
+#define PLL_NONE 0x00000000
+#define PLL_TYPE1 0x00010000
+#define PLL_TYPE2 0x00020000
+#define PLL_TYPE3 0x00030000
+#define PLL_TYPE4 0x00008000
+#define PLL_TYPE5 0x00018000
+#define PLL_TYPE6 0x00028000
+#define PLL_TYPE7 0x00038000
+
+
+#define ILP_CLOCK 32000
+
+
+#define ALP_CLOCK 20000000
+
+
+#define HT_CLOCK 80000000
+
+
+#define CC_UARTCLKO 0x00000001
+#define CC_SE 0x00000002
+#define CC_ASYNCGPIO 0x00000004
+#define CC_UARTCLKEN 0x00000008
+
+
+#define CHIPCTRL_4321A0_DEFAULT 0x3a4
+#define CHIPCTRL_4321A1_DEFAULT 0x0a4
+#define CHIPCTRL_4321_PLL_DOWN 0x800000
+
+
+#define OTPS_OL_MASK 0x000000ff
+#define OTPS_OL_MFG 0x00000001
+#define OTPS_OL_OR1 0x00000002
+#define OTPS_OL_OR2 0x00000004
+#define OTPS_OL_GU 0x00000008
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100
+#define OTPS_GUP_SW 0x00000200
+#define OTPS_GUP_CI 0x00000400
+#define OTPS_GUP_FUSE 0x00000800
+#define OTPS_READY 0x00001000
+#define OTPS_RV(x) (1 << (16 + (x)))
+#define OTPS_RV_MASK 0x0fff0000
+#define OTPS_PROGOK 0x40000000
+
+
+#define OTPC_PROGSEL 0x00000001
+#define OTPC_PCOUNT_MASK 0x0000000e
+#define OTPC_PCOUNT_SHIFT 1
+#define OTPC_VSEL_MASK 0x000000f0
+#define OTPC_VSEL_SHIFT 4
+#define OTPC_TMM_MASK 0x00000700
+#define OTPC_TMM_SHIFT 8
+#define OTPC_ODM 0x00000800
+#define OTPC_PROGEN 0x80000000
+
+
+#define OTPC_40NM_PROGSEL_SHIFT 0
+#define OTPC_40NM_PCOUNT_SHIFT 1
+#define OTPC_40NM_PCOUNT_WR 0xA
+#define OTPC_40NM_PCOUNT_V1X 0xB
+#define OTPC_40NM_REGCSEL_SHIFT 5
+#define OTPC_40NM_REGCSEL_DEF 0x4
+#define OTPC_40NM_PROGIN_SHIFT 8
+#define OTPC_40NM_R2X_SHIFT 10
+#define OTPC_40NM_ODM_SHIFT 11
+#define OTPC_40NM_DF_SHIFT 15
+#define OTPC_40NM_VSEL_SHIFT 16
+#define OTPC_40NM_VSEL_WR 0xA
+#define OTPC_40NM_VSEL_V1X 0xA
+#define OTPC_40NM_VSEL_R1X 0x5
+#define OTPC_40NM_COFAIL_SHIFT 30
+
+#define OTPC1_CPCSEL_SHIFT 0
+#define OTPC1_CPCSEL_DEF 6
+#define OTPC1_TM_SHIFT 8
+#define OTPC1_TM_WR 0x84
+#define OTPC1_TM_V1X 0x84
+#define OTPC1_TM_R1X 0x4
+
+
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000
+
+
+#define OTPL_HWRGN_OFF_MASK 0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT 0
+#define OTPL_WRAP_REVID_MASK 0x00F80000
+#define OTPL_WRAP_REVID_SHIFT 19
+#define OTPL_WRAP_TYPE_MASK 0x00070000
+#define OTPL_WRAP_TYPE_SHIFT 16
+#define OTPL_WRAP_TYPE_65NM 0
+#define OTPL_WRAP_TYPE_40NM 1
+
+
+#define OTP_CISFORMAT_NEW 0x80000000
+
+
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+
+#define OTPPOC_READ_40NM 0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM 2
+#define OTPPOC_VERIFY_40NM 3
+#define OTPPOC_WORD_VERIFY_1_40NM 4
+#define OTPPOC_ROW_LOCK_40NM 5
+#define OTPPOC_STBY_40NM 6
+#define OTPPOC_WAKEUP_40NM 7
+#define OTPPOC_WORD_VERIFY_0_40NM 8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM 10
+#define OTPPOC_WORDPROG_40NM 11
+#define OTPPOC_BURNIN_40NM 12
+#define OTPPOC_AUTORELOAD_40NM 13
+#define OTPPOC_OVST_READ_40NM 14
+#define OTPPOC_OVST_PROG_40NM 15
+
+
+#define OTPLAYOUTEXT_FUSE_MASK 0x3FF
+
+
+
+#define JTAGM_CREV_OLD 10
+#define JTAGM_CREV_IRP 22
+#define JTAGM_CREV_RTI 28
+
+
+#define JCMD_START 0x80000000
+#define JCMD_BUSY 0x80000000
+#define JCMD_STATE_MASK 0x60000000
+#define JCMD_STATE_TLR 0x00000000
+#define JCMD_STATE_PIR 0x20000000
+#define JCMD_STATE_PDR 0x40000000
+#define JCMD_STATE_RTI 0x60000000
+#define JCMD0_ACC_MASK 0x0000f000
+#define JCMD0_ACC_IRDR 0x00000000
+#define JCMD0_ACC_DR 0x00001000
+#define JCMD0_ACC_IR 0x00002000
+#define JCMD0_ACC_RESET 0x00003000
+#define JCMD0_ACC_IRPDR 0x00004000
+#define JCMD0_ACC_PDR 0x00005000
+#define JCMD0_IRW_MASK 0x00000f00
+#define JCMD_ACC_MASK 0x000f0000
+#define JCMD_ACC_IRDR 0x00000000
+#define JCMD_ACC_DR 0x00010000
+#define JCMD_ACC_IR 0x00020000
+#define JCMD_ACC_RESET 0x00030000
+#define JCMD_ACC_IRPDR 0x00040000
+#define JCMD_ACC_PDR 0x00050000
+#define JCMD_ACC_PIR 0x00060000
+#define JCMD_ACC_IRDR_I 0x00070000
+#define JCMD_ACC_DR_I 0x00080000
+#define JCMD_IRW_MASK 0x00001f00
+#define JCMD_IRW_SHIFT 8
+#define JCMD_DRW_MASK 0x0000003f
+
+
+#define JCTRL_FORCE_CLK 4
+#define JCTRL_EXT_EN 2
+#define JCTRL_EN 1
+
+
+#define CLKD_SFLASH 0x0f000000
+#define CLKD_SFLASH_SHIFT 24
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+#define CLKD_JTAG 0x00000f00
+#define CLKD_JTAG_SHIFT 8
+#define CLKD_UART 0x000000ff
+
+#define CLKD2_SROM 0x00000003
+
+
+#define CI_GPIO 0x00000001
+#define CI_EI 0x00000002
+#define CI_TEMP 0x00000004
+#define CI_SIRQ 0x00000008
+#define CI_ECI 0x00000010
+#define CI_PMU 0x00000020
+#define CI_UART 0x00000040
+#define CI_WDRESET 0x80000000
+
+
+#define SCC_SS_MASK 0x00000007
+#define SCC_SS_LPO 0x00000000
+#define SCC_SS_XTAL 0x00000001
+#define SCC_SS_PCI 0x00000002
+#define SCC_LF 0x00000200
+#define SCC_LP 0x00000400
+#define SCC_FS 0x00000800
+#define SCC_IP 0x00001000
+#define SCC_XC 0x00002000
+#define SCC_XP 0x00004000
+#define SCC_CD_MASK 0xffff0000
+#define SCC_CD_SHIFT 16
+
+
+#define SYCC_IE 0x00000001
+#define SYCC_AE 0x00000002
+#define SYCC_FP 0x00000004
+#define SYCC_AR 0x00000008
+#define SYCC_HR 0x00000010
+#define SYCC_CD_MASK 0xffff0000
+#define SYCC_CD_SHIFT 16
+
+
+#define BPIA_BYTEEN 0x0000000f
+#define BPIA_SZ1 0x00000001
+#define BPIA_SZ2 0x00000003
+#define BPIA_SZ4 0x00000007
+#define BPIA_SZ8 0x0000000f
+#define BPIA_WRITE 0x00000100
+#define BPIA_START 0x00000200
+#define BPIA_BUSY 0x00000200
+#define BPIA_ERROR 0x00000400
+
+
+#define CF_EN 0x00000001
+#define CF_EM_MASK 0x0000000e
+#define CF_EM_SHIFT 1
+#define CF_EM_FLASH 0
+#define CF_EM_SYNC 2
+#define CF_EM_PCMCIA 4
+#define CF_DS 0x00000010
+#define CF_BS 0x00000020
+#define CF_CD_MASK 0x000000c0
+#define CF_CD_SHIFT 6
+#define CF_CD_DIV2 0x00000000
+#define CF_CD_DIV3 0x00000040
+#define CF_CD_DIV4 0x00000080
+#define CF_CE 0x00000100
+#define CF_SB 0x00000200
+
+
+#define PM_W0_MASK 0x0000003f
+#define PM_W1_MASK 0x00001f00
+#define PM_W1_SHIFT 8
+#define PM_W2_MASK 0x001f0000
+#define PM_W2_SHIFT 16
+#define PM_W3_MASK 0x1f000000
+#define PM_W3_SHIFT 24
+
+
+#define PA_W0_MASK 0x0000003f
+#define PA_W1_MASK 0x00001f00
+#define PA_W1_SHIFT 8
+#define PA_W2_MASK 0x001f0000
+#define PA_W2_SHIFT 16
+#define PA_W3_MASK 0x1f000000
+#define PA_W3_SHIFT 24
+
+
+#define PI_W0_MASK 0x0000003f
+#define PI_W1_MASK 0x00001f00
+#define PI_W1_SHIFT 8
+#define PI_W2_MASK 0x001f0000
+#define PI_W2_SHIFT 16
+#define PI_W3_MASK 0x1f000000
+#define PI_W3_SHIFT 24
+
+
+#define PW_W0_MASK 0x0000001f
+#define PW_W1_MASK 0x00001f00
+#define PW_W1_SHIFT 8
+#define PW_W2_MASK 0x001f0000
+#define PW_W2_SHIFT 16
+#define PW_W3_MASK 0x1f000000
+#define PW_W3_SHIFT 24
+
+#define PW_W0 0x0000000c
+#define PW_W1 0x00000a00
+#define PW_W2 0x00020000
+#define PW_W3 0x01000000
+
+
+#define FW_W0_MASK 0x0000003f
+#define FW_W1_MASK 0x00001f00
+#define FW_W1_SHIFT 8
+#define FW_W2_MASK 0x001f0000
+#define FW_W2_SHIFT 16
+#define FW_W3_MASK 0x1f000000
+#define FW_W3_SHIFT 24
+
+
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_PLL_PLLCTL_UPD 0x00000400
+#define PCTL_NOILP_ON_WAIT 0x00000200
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+
+#define CSTRETCH_HT 0xffff0000
+#define CSTRETCH_ALP 0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT 16
+
+
+#define CN_N1_MASK 0x3f
+#define CN_N2_MASK 0x3f00
+#define CN_N2_SHIFT 8
+#define CN_PLLC_MASK 0xf0000
+#define CN_PLLC_SHIFT 16
+
+
+#define CC_M1_MASK 0x3f
+#define CC_M2_MASK 0x3f00
+#define CC_M2_SHIFT 8
+#define CC_M3_MASK 0x3f0000
+#define CC_M3_SHIFT 16
+#define CC_MC_MASK 0x1f000000
+#define CC_MC_SHIFT 24
+
+
+#define CC_F6_2 0x02
+#define CC_F6_3 0x03
+#define CC_F6_4 0x05
+#define CC_F6_5 0x09
+#define CC_F6_6 0x11
+#define CC_F6_7 0x21
+
+#define CC_F5_BIAS 5
+
+#define CC_MC_BYPASS 0x08
+#define CC_MC_M1 0x04
+#define CC_MC_M1M2 0x02
+#define CC_MC_M1M2M3 0x01
+#define CC_MC_M1M3 0x11
+
+
+#define CC_T2_BIAS 2
+#define CC_T2M2_BIAS 3
+
+#define CC_T2MC_M1BYP 1
+#define CC_T2MC_M2BYP 2
+#define CC_T2MC_M3BYP 4
+
+
+#define CC_T6_MMASK 1
+#define CC_T6_M0 120000000
+#define CC_T6_M1 100000000
+#define SB2MIPS_T6(sb) (2 * (sb))
+
+
+#define CC_CLOCK_BASE1 24000000
+#define CC_CLOCK_BASE2 12500000
+
+
+#define CLKC_5350_N 0x0311
+#define CLKC_5350_M 0x04020009
+
+
+#define FLASH_NONE 0x000
+#define SFLASH_ST 0x100
+#define SFLASH_AT 0x200
+#define NFLASH 0x300
+#define PFLASH 0x700
+
+
+#define CC_CFG_EN 0x0001
+#define CC_CFG_EM_MASK 0x000e
+#define CC_CFG_EM_ASYNC 0x0000
+#define CC_CFG_EM_SYNC 0x0002
+#define CC_CFG_EM_PCMCIA 0x0004
+#define CC_CFG_EM_IDE 0x0006
+#define CC_CFG_DS 0x0010
+#define CC_CFG_CD_MASK 0x00e0
+#define CC_CFG_CE 0x0100
+#define CC_CFG_SB 0x0200
+#define CC_CFG_IS 0x0400
+
+
+#define CC_EB_BASE 0x1a000000
+#define CC_EB_PCMCIA_MEM 0x1a000000
+#define CC_EB_PCMCIA_IO 0x1a200000
+#define CC_EB_PCMCIA_CFG 0x1a400000
+#define CC_EB_IDE 0x1a800000
+#define CC_EB_PCMCIA1_MEM 0x1a800000
+#define CC_EB_PCMCIA1_IO 0x1aa00000
+#define CC_EB_PCMCIA1_CFG 0x1ac00000
+#define CC_EB_PROGIF 0x1b000000
+
+
+
+#define SFLASH_OPCODE 0x000000ff
+#define SFLASH_ACTION 0x00000700
+#define SFLASH_CS_ACTIVE 0x00001000
+#define SFLASH_START 0x80000000
+#define SFLASH_BUSY SFLASH_START
+
+
+#define SFLASH_ACT_OPONLY 0x0000
+#define SFLASH_ACT_OP1D 0x0100
+#define SFLASH_ACT_OP3A 0x0200
+#define SFLASH_ACT_OP3A1D 0x0300
+#define SFLASH_ACT_OP3A4D 0x0400
+#define SFLASH_ACT_OP3A4X4D 0x0500
+#define SFLASH_ACT_OP3A1X4D 0x0700
+
+
+#define SFLASH_ST_WREN 0x0006
+#define SFLASH_ST_WRDIS 0x0004
+#define SFLASH_ST_RDSR 0x0105
+#define SFLASH_ST_WRSR 0x0101
+#define SFLASH_ST_READ 0x0303
+#define SFLASH_ST_PP 0x0302
+#define SFLASH_ST_SE 0x02d8
+#define SFLASH_ST_BE 0x00c7
+#define SFLASH_ST_DP 0x00b9
+#define SFLASH_ST_RES 0x03ab
+#define SFLASH_ST_CSA 0x1000
+#define SFLASH_ST_SSE 0x0220
+
+#define SFLASH_MXIC_RDID 0x0390
+#define SFLASH_MXIC_MFID 0xc2
+
+
+#define SFLASH_ST_WIP 0x01
+#define SFLASH_ST_WEL 0x02
+#define SFLASH_ST_BP_MASK 0x1c
+#define SFLASH_ST_BP_SHIFT 2
+#define SFLASH_ST_SRWD 0x80
+
+
+#define SFLASH_AT_READ 0x07e8
+#define SFLASH_AT_PAGE_READ 0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS 0x01d7
+#define SFLASH_AT_BUF1_WRITE 0x0384
+#define SFLASH_AT_BUF2_WRITE 0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
+#define SFLASH_AT_BUF1_PROGRAM 0x0288
+#define SFLASH_AT_BUF2_PROGRAM 0x0289
+#define SFLASH_AT_PAGE_ERASE 0x0281
+#define SFLASH_AT_BLOCK_ERASE 0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define SFLASH_AT_BUF1_LOAD 0x0253
+#define SFLASH_AT_BUF2_LOAD 0x0255
+#define SFLASH_AT_BUF1_COMPARE 0x0260
+#define SFLASH_AT_BUF2_COMPARE 0x0261
+#define SFLASH_AT_BUF1_REPROGRAM 0x0258
+#define SFLASH_AT_BUF2_REPROGRAM 0x0259
+
+
+#define SFLASH_AT_READY 0x80
+#define SFLASH_AT_MISMATCH 0x40
+#define SFLASH_AT_ID_MASK 0x38
+#define SFLASH_AT_ID_SHIFT 3
+
+
+#define GSIO_START 0x80000000
+#define GSIO_BUSY GSIO_START
+
+
+
+#define UART_RX 0
+#define UART_TX 0
+#define UART_DLL 0
+#define UART_IER 1
+#define UART_DLM 1
+#define UART_IIR 2
+#define UART_FCR 2
+#define UART_LCR 3
+#define UART_MCR 4
+#define UART_LSR 5
+#define UART_MSR 6
+#define UART_SCR 7
+#define UART_LCR_DLAB 0x80
+#define UART_LCR_WLEN8 0x03
+#define UART_MCR_OUT2 0x08
+#define UART_MCR_LOOP 0x10
+#define UART_LSR_RX_FIFO 0x80
+#define UART_LSR_TDHR 0x40
+#define UART_LSR_THRE 0x20
+#define UART_LSR_BREAK 0x10
+#define UART_LSR_FRAMING 0x08
+#define UART_LSR_PARITY 0x04
+#define UART_LSR_OVERRUN 0x02
+#define UART_LSR_RXRDY 0x01
+#define UART_FCR_FIFO_ENABLE 1
+
+
+#define UART_IIR_FIFO_MASK 0xc0
+#define UART_IIR_INT_MASK 0xf
+#define UART_IIR_MDM_CHG 0x0
+#define UART_IIR_NOINT 0x1
+#define UART_IIR_THRE 0x2
+#define UART_IIR_RCVD_DATA 0x4
+#define UART_IIR_RCVR_STATUS 0x6
+#define UART_IIR_CHAR_TIME 0xc
+
+
+#define UART_IER_EDSSI 8
+#define UART_IER_ELSI 4
+#define UART_IER_ETBEI 2
+#define UART_IER_ERBFI 1
+
+
+#define PST_EXTLPOAVAIL 0x0100
+#define PST_WDRESET 0x0080
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+
+
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+
+
+#define PRRT_TIME_MASK 0x03ff
+#define PRRT_INTEN 0x0400
+#define PRRT_REQ_ACTIVE 0x0800
+#define PRRT_ALP_REQ 0x1000
+#define PRRT_HT_REQ 0x2000
+#define PRRT_HQ_REQ 0x4000
+
+
+#define PMURES_BIT(bit) (1 << (bit))
+
+
+#define PMURES_MAX_RESNUM 30
+
+
+#define PMU_CHIPCTL0 0
+
+
+#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
+#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
+
+
+#define PMU_CHIPCTL1 1
+#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
+
+#define PMU_CC1_IF_TYPE_MASK 0x00000030
+#define PMU_CC1_IF_TYPE_RMII 0x00000000
+#define PMU_CC1_IF_TYPE_MII 0x00000010
+#define PMU_CC1_IF_TYPE_RGMII 0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK 0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY 0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
+#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
+
+
+#define PMU_CHIPCTL2 2
+
+
+#define PMU_CHIPCTL3 3
+
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19
+#define PMU_CC3_ENABLE_RF_SHIFT 22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23
+
+
+
+
+
+#define PMU0_PLL0_PLLCTL0 0
+#define PMU0_PLL0_PC0_PDIV_MASK 1
+#define PMU0_PLL0_PC0_PDIV_FREQ 25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
+
+
+#define PMU0_PLL0_PLLCTL1 1
+#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
+#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
+#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
+#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
+#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
+
+
+#define PMU0_PLL0_PLLCTL2 2
+#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
+#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
+
+
+
+#define PMU1_PLL0_PLLCTL0 0
+#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
+#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
+
+
+#define PMU1_PLL0_PLLCTL1 1
+#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
+#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT 8
+#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT 16
+#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT 24
+#define PMU1_PLL0_PC1_M4DIV_BY_9 9
+#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU1_PLL0_PLLCTL2 2
+#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
+#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT 8
+#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2
+#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU1_PLL0_PLLCTL3 3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU1_PLL0_PLLCTL4 4
+
+
+#define PMU1_PLL0_PLLCTL5 5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+
+#define PMU2_PHY_PLL_PLLCTL 4
+#define PMU2_SI_PLL_PLLCTL 10
+
+
+
+
+#define PMU2_PLL_PLLCTL0 0
+#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT 20
+#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL1 1
+#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT 0
+#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT 8
+#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT 16
+#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL2 2
+#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT 0
+#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT 8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17
+#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU2_PLL_PLLCTL3 3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU2_PLL_PLLCTL4 4
+
+
+#define PMU2_PLL_PLLCTL5 5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
+
+
+#define PMU5_PLL_P1P2_OFF 0
+#define PMU5_PLL_P1_MASK 0x0f000000
+#define PMU5_PLL_P1_SHIFT 24
+#define PMU5_PLL_P2_MASK 0x00f00000
+#define PMU5_PLL_P2_SHIFT 20
+#define PMU5_PLL_M14_OFF 1
+#define PMU5_PLL_MDIV_MASK 0x000000ff
+#define PMU5_PLL_MDIV_WIDTH 8
+#define PMU5_PLL_NM5_OFF 2
+#define PMU5_PLL_NDIV_MASK 0xfff00000
+#define PMU5_PLL_NDIV_SHIFT 20
+#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000
+#define PMU5_PLL_NDIV_MODE_SHIFT 17
+#define PMU5_PLL_FMAB_OFF 3
+#define PMU5_PLL_MRAT_MASK 0xf0000000
+#define PMU5_PLL_MRAT_SHIFT 28
+#define PMU5_PLL_ABRAT_MASK 0x08000000
+#define PMU5_PLL_ABRAT_SHIFT 27
+#define PMU5_PLL_FDIV_MASK 0x07ffffff
+#define PMU5_PLL_PLLCTL_OFF 4
+#define PMU5_PLL_PCHI_OFF 5
+#define PMU5_PLL_PCHI_MASK 0x0000003f
+
+
+#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
+#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
+#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
+
+
+#define PMU5_MAINPLL_CPU 1
+#define PMU5_MAINPLL_MEM 2
+#define PMU5_MAINPLL_SI 3
+
+
+#define PMU4706_MAINPLL_PLL0 0
+#define PMU6_4706_PROCPLL_OFF 4
+#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT 16
+#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT 12
+#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT 3
+#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0
+
+#define PMU7_PLL_PLLCTL7 7
+#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 24
+#define PMU7_PLL_CTL7_M4DIV_BY_6 6
+#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL8 8
+#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT 0
+#define PMU7_PLL_CTL8_M5DIV_BY_8 8
+#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT 8
+#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL11 11
+#define PMU7_PLL_PLLCTL11_MASK 0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL 0x22222200
+
+
+#define PMU15_PLL_PLLCTL0 0
+#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT 0
+#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT 2
+#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT 22
+#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT 24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27
+#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT 30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31
+
+#define PMU15_PLL_PLLCTL1 1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30
+
+#define PMU15_PLL_PLLCTL2 2
+#define PMU15_PLL_PC2_CTEN_MASK 0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT 0
+
+#define PMU15_PLL_PLLCTL3 3
+#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3
+
+#define PMU15_PLL_PLLCTL4 4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6
+#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT 9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13
+#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT 20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25
+#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT 26
+
+#define PMU15_PLL_PLLCTL5 5
+#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT 27
+
+#define PMU15_PLL_PLLCTL6 6
+#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT 27
+
+#define PMU15_FREQTGT_480_DEFAULT 0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5
+#define PMU15_ARM_96MHZ 96000000
+#define PMU15_ARM_98MHZ 98400000
+#define PMU15_ARM_97MHZ 97000000
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT 0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN 0
+#define PMU17_PLLCTL0_BBPLL_DRST 3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8
+
+
+#define PMU4716_MAINPLL_PLL0 12
+
+
+#define PMU5356_MAINPLL_PLL0 0
+#define PMU5357_MAINPLL_PLL0 0
+
+
+#define RES4716_PROC_PLL_ON 0x00000040
+#define RES4716_PROC_HT_AVAIL 0x00000080
+
+
+#define CCTRL_471X_I2S_PINS_ENABLE 0x0080
+
+
+
+#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000
+#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000
+
+
+#define RES5354_EXT_SWITCHER_PWM 0
+#define RES5354_BB_SWITCHER_PWM 1
+#define RES5354_BB_SWITCHER_BURST 2
+#define RES5354_BB_EXT_SWITCHER_BURST 3
+#define RES5354_ILP_REQUEST 4
+#define RES5354_RADIO_SWITCHER_PWM 5
+#define RES5354_RADIO_SWITCHER_BURST 6
+#define RES5354_ROM_SWITCH 7
+#define RES5354_PA_REF_LDO 8
+#define RES5354_RADIO_LDO 9
+#define RES5354_AFE_LDO 10
+#define RES5354_PLL_LDO 11
+#define RES5354_BG_FILTBYP 12
+#define RES5354_TX_FILTBYP 13
+#define RES5354_RX_FILTBYP 14
+#define RES5354_XTAL_PU 15
+#define RES5354_XTAL_EN 16
+#define RES5354_BB_PLL_FILTBYP 17
+#define RES5354_RF_PLL_FILTBYP 18
+#define RES5354_BB_PLL_PU 19
+
+
+#define CCTRL5357_EXTPA (1<<14)
+#define CCTRL5357_ANT_MUX_2o3 (1<<15)
+#define CCTRL5357_NFLASH (1<<16)
+
+
+#define CCTRL43217_EXTPA_C0 (1<<13)
+#define CCTRL43217_EXTPA_C1 (1<<8)
+
+
+#define RES4328_EXT_SWITCHER_PWM 0
+#define RES4328_BB_SWITCHER_PWM 1
+#define RES4328_BB_SWITCHER_BURST 2
+#define RES4328_BB_EXT_SWITCHER_BURST 3
+#define RES4328_ILP_REQUEST 4
+#define RES4328_RADIO_SWITCHER_PWM 5
+#define RES4328_RADIO_SWITCHER_BURST 6
+#define RES4328_ROM_SWITCH 7
+#define RES4328_PA_REF_LDO 8
+#define RES4328_RADIO_LDO 9
+#define RES4328_AFE_LDO 10
+#define RES4328_PLL_LDO 11
+#define RES4328_BG_FILTBYP 12
+#define RES4328_TX_FILTBYP 13
+#define RES4328_RX_FILTBYP 14
+#define RES4328_XTAL_PU 15
+#define RES4328_XTAL_EN 16
+#define RES4328_BB_PLL_FILTBYP 17
+#define RES4328_RF_PLL_FILTBYP 18
+#define RES4328_BB_PLL_PU 19
+
+
+#define RES4325_BUCK_BOOST_BURST 0
+#define RES4325_CBUCK_BURST 1
+#define RES4325_CBUCK_PWM 2
+#define RES4325_CLDO_CBUCK_BURST 3
+#define RES4325_CLDO_CBUCK_PWM 4
+#define RES4325_BUCK_BOOST_PWM 5
+#define RES4325_ILP_REQUEST 6
+#define RES4325_ABUCK_BURST 7
+#define RES4325_ABUCK_PWM 8
+#define RES4325_LNLDO1_PU 9
+#define RES4325_OTP_PU 10
+#define RES4325_LNLDO3_PU 11
+#define RES4325_LNLDO4_PU 12
+#define RES4325_XTAL_PU 13
+#define RES4325_ALP_AVAIL 14
+#define RES4325_RX_PWRSW_PU 15
+#define RES4325_TX_PWRSW_PU 16
+#define RES4325_RFPLL_PWRSW_PU 17
+#define RES4325_LOGEN_PWRSW_PU 18
+#define RES4325_AFE_PWRSW_PU 19
+#define RES4325_BBPLL_PWRSW_PU 20
+#define RES4325_HT_AVAIL 21
+
+
+#define RES4325B0_CBUCK_LPOM 1
+#define RES4325B0_CBUCK_BURST 2
+#define RES4325B0_CBUCK_PWM 3
+#define RES4325B0_CLDO_PU 4
+
+
+#define RES4325C1_LNLDO2_PU 12
+
+
+#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4325_DEFCIS_SEL 0
+#define CST4325_SPROM_SEL 1
+#define CST4325_OTP_SEL 2
+#define CST4325_OTP_PWRDN 3
+#define CST4325_SDIO_USB_MODE_MASK 0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT 2
+#define CST4325_RCAL_VALID_MASK 0x00000008
+#define CST4325_RCAL_VALID_SHIFT 3
+#define CST4325_RCAL_VALUE_MASK 0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT 4
+#define CST4325_PMUTOP_2B_MASK 0x00000200
+#define CST4325_PMUTOP_2B_SHIFT 9
+
+#define RES4329_RESERVED0 0
+#define RES4329_CBUCK_LPOM 1
+#define RES4329_CBUCK_BURST 2
+#define RES4329_CBUCK_PWM 3
+#define RES4329_CLDO_PU 4
+#define RES4329_PALDO_PU 5
+#define RES4329_ILP_REQUEST 6
+#define RES4329_RESERVED7 7
+#define RES4329_RESERVED8 8
+#define RES4329_LNLDO1_PU 9
+#define RES4329_OTP_PU 10
+#define RES4329_RESERVED11 11
+#define RES4329_LNLDO2_PU 12
+#define RES4329_XTAL_PU 13
+#define RES4329_ALP_AVAIL 14
+#define RES4329_RX_PWRSW_PU 15
+#define RES4329_TX_PWRSW_PU 16
+#define RES4329_RFPLL_PWRSW_PU 17
+#define RES4329_LOGEN_PWRSW_PU 18
+#define RES4329_AFE_PWRSW_PU 19
+#define RES4329_BBPLL_PWRSW_PU 20
+#define RES4329_HT_AVAIL 21
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0
+#define CST4329_SPROM_SEL 1
+#define CST4329_OTP_SEL 2
+#define CST4329_OTP_PWRDN 3
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+
+#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4312_DEFCIS_SEL 0
+#define CST4312_SPROM_SEL 1
+#define CST4312_OTP_SEL 2
+#define CST4312_OTP_BAD 3
+
+
+#define RES4312_SWITCHER_BURST 0
+#define RES4312_SWITCHER_PWM 1
+#define RES4312_PA_REF_LDO 2
+#define RES4312_CORE_LDO_BURST 3
+#define RES4312_CORE_LDO_PWM 4
+#define RES4312_RADIO_LDO 5
+#define RES4312_ILP_REQUEST 6
+#define RES4312_BG_FILTBYP 7
+#define RES4312_TX_FILTBYP 8
+#define RES4312_RX_FILTBYP 9
+#define RES4312_XTAL_PU 10
+#define RES4312_ALP_AVAIL 11
+#define RES4312_BB_PLL_FILTBYP 12
+#define RES4312_RF_PLL_FILTBYP 13
+#define RES4312_HT_AVAIL 14
+
+
+#define RES4322_RF_LDO 0
+#define RES4322_ILP_REQUEST 1
+#define RES4322_XTAL_PU 2
+#define RES4322_ALP_AVAIL 3
+#define RES4322_SI_PLL_ON 4
+#define RES4322_HT_SI_AVAIL 5
+#define RES4322_PHY_PLL_ON 6
+#define RES4322_HT_PHY_AVAIL 7
+#define RES4322_OTP_PU 8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT 6
+#define CST4322_NO_SPROM_OTP 0
+#define CST4322_SPROM_PRESENT 1
+#define CST4322_OTP_PRESENT 2
+#define CST4322_PCI_OR_USB 0x00000100
+#define CST4322_BOOT_MASK 0x00000600
+#define CST4322_BOOT_SHIFT 9
+#define CST4322_BOOT_FROM_SRAM 0
+#define CST4322_BOOT_FROM_ROM 1
+#define CST4322_BOOT_FROM_FLASH 2
+#define CST4322_BOOT_FROM_INVALID 3
+#define CST4322_ILP_DIV_EN 0x00000800
+#define CST4322_FLASH_TYPE_MASK 0x00001000
+#define CST4322_FLASH_TYPE_SHIFT 12
+#define CST4322_FLASH_TYPE_SHIFT_ST 0
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1
+#define CST4322_ARM_TAP_SEL 0x00002000
+#define CST4322_RES_INIT_MODE_MASK 0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT 14
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0
+#define CST4322_RES_INIT_MODE_ILPREQ 1
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2
+#define CST4322_RES_INIT_MODE_HTAVAIL 3
+#define CST4322_PCIPLLCLK_GATING 0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
+#define CST4322_PCI_CARDBUS_MODE 0x00040000
+
+
+#define CCTRL43224_GPIO_TOGGLE 0x8000
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
+
+
+#define RES43236_REGULATOR 0
+#define RES43236_ILP_REQUEST 1
+#define RES43236_XTAL_PU 2
+#define RES43236_ALP_AVAIL 3
+#define RES43236_SI_PLL_ON 4
+#define RES43236_HT_SI_AVAIL 5
+
+
+#define CCTRL43236_BT_COEXIST (1<<0)
+#define CCTRL43236_SECI (1<<1)
+#define CCTRL43236_EXT_LNA (1<<2)
+#define CCTRL43236_ANT_MUX_2o3 (1<<3)
+#define CCTRL43236_GSIO (1<<4)
+
+
+#define CST43236_SFLASH_MASK 0x00000040
+#define CST43236_OTP_SEL_MASK 0x00000080
+#define CST43236_OTP_SEL_SHIFT 7
+#define CST43236_HSIC_MASK 0x00000100
+#define CST43236_BP_CLK 0x00000200
+#define CST43236_BOOT_MASK 0x00001800
+#define CST43236_BOOT_SHIFT 11
+#define CST43236_BOOT_FROM_SRAM 0
+#define CST43236_BOOT_FROM_ROM 1
+#define CST43236_BOOT_FROM_FLASH 2
+#define CST43236_BOOT_FROM_INVALID 3
+
+
+#define RES43237_REGULATOR 0
+#define RES43237_ILP_REQUEST 1
+#define RES43237_XTAL_PU 2
+#define RES43237_ALP_AVAIL 3
+#define RES43237_SI_PLL_ON 4
+#define RES43237_HT_SI_AVAIL 5
+
+
+#define CCTRL43237_BT_COEXIST (1<<0)
+#define CCTRL43237_SECI (1<<1)
+#define CCTRL43237_EXT_LNA (1<<2)
+#define CCTRL43237_ANT_MUX_2o3 (1<<3)
+#define CCTRL43237_GSIO (1<<4)
+
+
+#define CST43237_SFLASH_MASK 0x00000040
+#define CST43237_OTP_SEL_MASK 0x00000080
+#define CST43237_OTP_SEL_SHIFT 7
+#define CST43237_HSIC_MASK 0x00000100
+#define CST43237_BP_CLK 0x00000200
+#define CST43237_BOOT_MASK 0x00001800
+#define CST43237_BOOT_SHIFT 11
+#define CST43237_BOOT_FROM_SRAM 0
+#define CST43237_BOOT_FROM_ROM 1
+#define CST43237_BOOT_FROM_FLASH 2
+#define CST43237_BOOT_FROM_INVALID 3
+
+
+#define RES43239_OTP_PU 9
+#define RES43239_MACPHY_CLKAVAIL 23
+#define RES43239_HT_AVAIL 24
+
+
+#define CST43239_SPROM_MASK 0x00000002
+#define CST43239_SFLASH_MASK 0x00000004
+#define CST43239_RES_INIT_MODE_SHIFT 7
+#define CST43239_RES_INIT_MODE_MASK 0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15))
+#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15))
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0)
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0))
+
+
+#define RES4324_OTP_PU 10
+#define RES4324_HT_AVAIL 29
+#define RES4324_MACPHY_CLKAVAIL 30
+
+
+#define CST4324_SPROM_MASK 0x00000080
+#define CST4324_SFLASH_MASK 0x00400000
+#define CST4324_RES_INIT_MODE_SHIFT 10
+#define CST4324_RES_INIT_MODE_MASK 0x00000c00
+#define CST4324_CHIPMODE_MASK 0x7
+#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2))
+#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6)
+
+
+#define RES4331_REGULATOR 0
+#define RES4331_ILP_REQUEST 1
+#define RES4331_XTAL_PU 2
+#define RES4331_ALP_AVAIL 3
+#define RES4331_SI_PLL_ON 4
+#define RES4331_HT_SI_AVAIL 5
+
+
+#define CCTRL4331_BT_COEXIST (1<<0)
+#define CCTRL4331_SECI (1<<1)
+#define CCTRL4331_EXT_LNA_G (1<<2)
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
+#define CCTRL4331_EXTPA_EN (1<<4)
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
+#define CCTRL4331_EXTPA_EN2 (1<<12)
+#define CCTRL4331_EXT_LNA_A (1<<13)
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
+#define CCTRL4331_EXTPA_ANA_EN (1<<24)
+
+
+#define CST4331_XTAL_FREQ 0x00000001
+#define CST4331_SPROM_OTP_SEL_MASK 0x00000006
+#define CST4331_SPROM_OTP_SEL_SHIFT 1
+#define CST4331_SPROM_PRESENT 0x00000002
+#define CST4331_OTP_PRESENT 0x00000004
+#define CST4331_LDO_RF 0x00000008
+#define CST4331_LDO_PAR 0x00000010
+
+
+#define RES4315_CBUCK_LPOM 1
+#define RES4315_CBUCK_BURST 2
+#define RES4315_CBUCK_PWM 3
+#define RES4315_CLDO_PU 4
+#define RES4315_PALDO_PU 5
+#define RES4315_ILP_REQUEST 6
+#define RES4315_LNLDO1_PU 9
+#define RES4315_OTP_PU 10
+#define RES4315_LNLDO2_PU 12
+#define RES4315_XTAL_PU 13
+#define RES4315_ALP_AVAIL 14
+#define RES4315_RX_PWRSW_PU 15
+#define RES4315_TX_PWRSW_PU 16
+#define RES4315_RFPLL_PWRSW_PU 17
+#define RES4315_LOGEN_PWRSW_PU 18
+#define RES4315_AFE_PWRSW_PU 19
+#define RES4315_BBPLL_PWRSW_PU 20
+#define RES4315_HT_AVAIL 21
+
+
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4315_DEFCIS_SEL 0x00000000
+#define CST4315_SPROM_SEL 0x00000001
+#define CST4315_OTP_SEL 0x00000002
+#define CST4315_OTP_PWRDN 0x00000003
+#define CST4315_SDIO_MODE 0x00000004
+#define CST4315_RCAL_VALID 0x00000008
+#define CST4315_RCAL_VALUE_MASK 0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT 4
+#define CST4315_PALDO_EXTPNP 0x00000200
+#define CST4315_CBUCK_MODE_MASK 0x00000c00
+#define CST4315_CBUCK_MODE_BURST 0x00000400
+#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
+
+
+#define RES4319_CBUCK_LPOM 1
+#define RES4319_CBUCK_BURST 2
+#define RES4319_CBUCK_PWM 3
+#define RES4319_CLDO_PU 4
+#define RES4319_PALDO_PU 5
+#define RES4319_ILP_REQUEST 6
+#define RES4319_LNLDO1_PU 9
+#define RES4319_OTP_PU 10
+#define RES4319_LNLDO2_PU 12
+#define RES4319_XTAL_PU 13
+#define RES4319_ALP_AVAIL 14
+#define RES4319_RX_PWRSW_PU 15
+#define RES4319_TX_PWRSW_PU 16
+#define RES4319_RFPLL_PWRSW_PU 17
+#define RES4319_LOGEN_PWRSW_PU 18
+#define RES4319_AFE_PWRSW_PU 19
+#define RES4319_BBPLL_PWRSW_PU 20
+#define RES4319_HT_AVAIL 21
+
+
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+#define CST4319_DEFCIS_SEL 0x00000000
+#define CST4319_SPROM_SEL 0x00000040
+#define CST4319_OTP_SEL 0x00000080
+#define CST4319_OTP_PWRDN 0x000000c0
+#define CST4319_SDIO_USB_MODE 0x00000100
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+#define CST4319_PALDO_EXTPNP 0x00010000
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
+#define PMU1_PLL0_CHIPCTL0 0
+#define PMU1_PLL0_CHIPCTL1 1
+#define PMU1_PLL0_CHIPCTL2 2
+#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT 19
+#define CCTL_4319USB_48MHZ_PLL_SEL 1
+#define CCTL_4319USB_24MHZ_PLL_SEL 2
+
+
+#define RES4336_CBUCK_LPOM 0
+#define RES4336_CBUCK_BURST 1
+#define RES4336_CBUCK_LP_PWM 2
+#define RES4336_CBUCK_PWM 3
+#define RES4336_CLDO_PU 4
+#define RES4336_DIS_INT_RESET_PD 5
+#define RES4336_ILP_REQUEST 6
+#define RES4336_LNLDO_PU 7
+#define RES4336_LDO3P3_PU 8
+#define RES4336_OTP_PU 9
+#define RES4336_XTAL_PU 10
+#define RES4336_ALP_AVAIL 11
+#define RES4336_RADIO_PU 12
+#define RES4336_BG_PU 13
+#define RES4336_VREG1p4_PU_PU 14
+#define RES4336_AFE_PWRSW_PU 15
+#define RES4336_RX_PWRSW_PU 16
+#define RES4336_TX_PWRSW_PU 17
+#define RES4336_BB_PWRSW_PU 18
+#define RES4336_SYNTH_PWRSW_PU 19
+#define RES4336_MISC_PWRSW_PU 20
+#define RES4336_LOGEN_PWRSW_PU 21
+#define RES4336_BBPLL_PWRSW_PU 22
+#define RES4336_MACPHY_CLKAVAIL 23
+#define RES4336_HT_AVAIL 24
+#define RES4336_RSVD 25
+
+
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+
+#define PCTL_4336_SERIAL_ENAB (1 << 24)
+
+
+#define RES4330_CBUCK_LPOM 0
+#define RES4330_CBUCK_BURST 1
+#define RES4330_CBUCK_LP_PWM 2
+#define RES4330_CBUCK_PWM 3
+#define RES4330_CLDO_PU 4
+#define RES4330_DIS_INT_RESET_PD 5
+#define RES4330_ILP_REQUEST 6
+#define RES4330_LNLDO_PU 7
+#define RES4330_LDO3P3_PU 8
+#define RES4330_OTP_PU 9
+#define RES4330_XTAL_PU 10
+#define RES4330_ALP_AVAIL 11
+#define RES4330_RADIO_PU 12
+#define RES4330_BG_PU 13
+#define RES4330_VREG1p4_PU_PU 14
+#define RES4330_AFE_PWRSW_PU 15
+#define RES4330_RX_PWRSW_PU 16
+#define RES4330_TX_PWRSW_PU 17
+#define RES4330_BB_PWRSW_PU 18
+#define RES4330_SYNTH_PWRSW_PU 19
+#define RES4330_MISC_PWRSW_PU 20
+#define RES4330_LOGEN_PWRSW_PU 21
+#define RES4330_BBPLL_PWRSW_PU 22
+#define RES4330_MACPHY_CLKAVAIL 23
+#define RES4330_HT_AVAIL 24
+#define RES4330_5gRX_PWRSW_PU 25
+#define RES4330_5gTX_PWRSW_PU 26
+#define RES4330_5g_LOGEN_PWRSW_PU 27
+
+
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6)
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6)
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0)
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4)
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6)
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7)
+#define CST4330_OTP_PRESENT 0x00000010
+#define CST4330_LPO_AUTODET_EN 0x00000020
+#define CST4330_ARMREMAP_0 0x00000040
+#define CST4330_SPROM_PRESENT 0x00000080
+#define CST4330_ILPDIV_EN 0x00000100
+#define CST4330_LPO_SEL 0x00000200
+#define CST4330_RES_INIT_MODE_SHIFT 10
+#define CST4330_RES_INIT_MODE_MASK 0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT 12
+#define CST4330_CBUCK_MODE_MASK 0x00003000
+#define CST4330_CBUCK_POWER_OK 0x00004000
+#define CST4330_BB_PLL_LOCKED 0x00008000
+#define SOCDEVRAM_BP_ADDR 0x1E000000
+#define SOCDEVRAM_ARM_ADDR 0x00800000
+
+
+#define PCTL_4330_SERIAL_ENAB (1 << 24)
+
+
+#define CCTRL_4330_GPIO_SEL 0x00000001
+#define CCTRL_4330_ERCX_SEL 0x00000002
+#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004
+#define CCTRL_4330_JTAG_DISABLE 0x00000008
+
+#define PMU_VREG0_ADDR 0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3
+
+
+#define RES4334_LPLDO_PU 0
+#define RES4334_RESET_PULLDN_DIS 1
+#define RES4334_PMU_BG_PU 2
+#define RES4334_HSIC_LDO_PU 3
+#define RES4334_CBUCK_LPOM_PU 4
+#define RES4334_CBUCK_PFM_PU 5
+#define RES4334_CLDO_PU 6
+#define RES4334_LPLDO2_LVM 7
+#define RES4334_LNLDO_PU 8
+#define RES4334_LDO3P3_PU 9
+#define RES4334_OTP_PU 10
+#define RES4334_XTAL_PU 11
+#define RES4334_WL_PWRSW_PU 12
+#define RES4334_LQ_AVAIL 13
+#define RES4334_LOGIC_RET 14
+#define RES4334_MEM_SLEEP 15
+#define RES4334_MACPHY_RET 16
+#define RES4334_WL_CORE_READY 17
+#define RES4334_ILP_REQ 18
+#define RES4334_ALP_AVAIL 19
+#define RES4334_MISC_PWRSW_PU 20
+#define RES4334_SYNTH_PWRSW_PU 21
+#define RES4334_RX_PWRSW_PU 22
+#define RES4334_RADIO_PU 23
+#define RES4334_WL_PMU_PU 24
+#define RES4334_VCO_LDO_PU 25
+#define RES4334_AFE_LDO_PU 26
+#define RES4334_RX_LDO_PU 27
+#define RES4334_TX_LDO_PU 28
+#define RES4334_HT_AVAIL 29
+#define RES4334_MACPHY_CLK_AVAIL 30
+
+
+#define CST4334_CHIPMODE_MASK 7
+#define CST4334_SDIO_MODE 0x00000000
+#define CST4334_SPI_MODE 0x00000004
+#define CST4334_HSIC_MODE 0x00000006
+#define CST4334_BLUSB_MODE 0x00000007
+#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT 0x00000010
+#define CST4334_LPO_AUTODET_EN 0x00000020
+#define CST4334_ARMREMAP_0 0x00000040
+#define CST4334_SPROM_PRESENT 0x00000080
+#define CST4334_ILPDIV_EN_MASK 0x00000100
+#define CST4334_ILPDIV_EN_SHIFT 8
+#define CST4334_LPO_SEL_MASK 0x00000200
+#define CST4334_LPO_SEL_SHIFT 9
+#define CST4334_RES_INIT_MODE_MASK 0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT 10
+
+
+#define PCTL_4334_GPIO3_ENAB (1 << 3)
+
+
+#define CCTRL4334_HSIC_LDO_PU (1 << 23)
+
+
+#define CCTRL1_4324_GPIO_SEL (1 << 0)
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)
+
+
+
+#define RES4313_BB_PU_RSRC 0
+#define RES4313_ILP_REQ_RSRC 1
+#define RES4313_XTAL_PU_RSRC 2
+#define RES4313_ALP_AVAIL_RSRC 3
+#define RES4313_RADIO_PU_RSRC 4
+#define RES4313_BG_PU_RSRC 5
+#define RES4313_VREG1P4_PU_RSRC 6
+#define RES4313_AFE_PWRSW_RSRC 7
+#define RES4313_RX_PWRSW_RSRC 8
+#define RES4313_TX_PWRSW_RSRC 9
+#define RES4313_BB_PWRSW_RSRC 10
+#define RES4313_SYNTH_PWRSW_RSRC 11
+#define RES4313_MISC_PWRSW_RSRC 12
+#define RES4313_BB_PLL_PWRSW_RSRC 13
+#define RES4313_HT_AVAIL_RSRC 14
+#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
+
+
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
+
+
+#define RES4314_LPLDO_PU 0
+#define RES4314_PMU_SLEEP_DIS 1
+#define RES4314_PMU_BG_PU 2
+#define RES4314_CBUCK_LPOM_PU 3
+#define RES4314_CBUCK_PFM_PU 4
+#define RES4314_CLDO_PU 5
+#define RES4314_LPLDO2_LVM 6
+#define RES4314_WL_PMU_PU 7
+#define RES4314_LNLDO_PU 8
+#define RES4314_LDO3P3_PU 9
+#define RES4314_OTP_PU 10
+#define RES4314_XTAL_PU 11
+#define RES4314_WL_PWRSW_PU 12
+#define RES4314_LQ_AVAIL 13
+#define RES4314_LOGIC_RET 14
+#define RES4314_MEM_SLEEP 15
+#define RES4314_MACPHY_RET 16
+#define RES4314_WL_CORE_READY 17
+#define RES4314_ILP_REQ 18
+#define RES4314_ALP_AVAIL 19
+#define RES4314_MISC_PWRSW_PU 20
+#define RES4314_SYNTH_PWRSW_PU 21
+#define RES4314_RX_PWRSW_PU 22
+#define RES4314_RADIO_PU 23
+#define RES4314_VCO_LDO_PU 24
+#define RES4314_AFE_LDO_PU 25
+#define RES4314_RX_LDO_PU 26
+#define RES4314_TX_LDO_PU 27
+#define RES4314_HT_AVAIL 28
+#define RES4314_MACPHY_CLK_AVAIL 29
+
+
+#define CST4314_OTP_ENABLED 0x00200000
+
+
+#define RES43228_NOT_USED 0
+#define RES43228_ILP_REQUEST 1
+#define RES43228_XTAL_PU 2
+#define RES43228_ALP_AVAIL 3
+#define RES43228_PLL_EN 4
+#define RES43228_HT_PHY_AVAIL 5
+
+
+#define CST43228_ILP_DIV_EN 0x1
+#define CST43228_OTP_PRESENT 0x2
+#define CST43228_SERDES_REFCLK_PADSEL 0x4
+#define CST43228_SDIO_MODE 0x8
+#define CST43228_SDIO_OTP_PRESENT 0x10
+#define CST43228_SDIO_RESET 0x20
+
+
+#define CST4706_PKG_OPTION (1<<0)
+#define CST4706_SFLASH_PRESENT (1<<1)
+#define CST4706_SFLASH_TYPE (1<<2)
+#define CST4706_MIPS_BENDIAN (1<<3)
+#define CST4706_PCIE1_DISABLE (1<<5)
+
+
+#define FLSTRCF4706_MASK 0x000000ff
+#define FLSTRCF4706_SF1 0x00000001
+#define FLSTRCF4706_PF1 0x00000002
+#define FLSTRCF4706_SF1_TYPE 0x00000004
+#define FLSTRCF4706_NF1 0x00000008
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070
+
+
+#define CCTRL4360_SECI_MODE (1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE (1 << 3)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8)
+#define CCTRL4360_BT_LGCY_MODE (1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21)
+
+
+#define RES4360_REGULATOR 0
+#define RES4360_ILP_AVAIL 1
+#define RES4360_ILP_REQ 2
+#define RES4360_XTAL_LDO_PU 3
+#define RES4360_XTAL_PU 4
+#define RES4360_ALP_AVAIL 5
+#define RES4360_BBPLLPWRSW_PU 6
+#define RES4360_HT_AVAIL 7
+#define RES4360_OTP_PU 8
+
+#define CST4360_XTAL_40MZ 0x00000001
+#define CST4360_SFLASH 0x00000002
+#define CST4360_SPROM_PRESENT 0x00000004
+#define CST4360_SFLASH_TYPE 0x00000004
+#define CST4360_OTP_ENABLED 0x00000008
+#define CST4360_REMAP_ROM 0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK 0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT 5
+#define CST4360_ILP_DIVEN 0x00000080
+#define CST4360_MODE_USB 0x00000100
+#define CST4360_SPROM_SIZE_MASK 0x00000600
+#define CST4360_SPROM_SIZE_SHIFT 9
+#define CST4360_BBPLL_LOCK 0x00000800
+#define CST4360_AVBBPLL_LOCK 0x00001000
+#define CST4360_USBBBPLL_LOCK 0x00002000
+
+#define CCTRL_4360_UART_SEL 0x2
+
+
+#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) & CST4360_MODE_USB)
+
+
+#define PMU_MAX_TRANSITION_DLY 15000
+
+
+#define PMURES_UP_TRANSITION 2
+
+
+
+#define SECI_MODE_UART 0x0
+#define SECI_MODE_SECI 0x1
+#define SECI_MODE_LEGACY_3WIRE_BT 0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3
+#define SECI_MODE_HALF_SECI 0x4
+
+#define SECI_RESET (1 << 0)
+#define SECI_RESET_BAR_UART (1 << 1)
+#define SECI_ENAB_SECI_ECI (1 << 2)
+#define SECI_ENAB_SECIOUT_DIS (1 << 3)
+#define SECI_MODE_MASK 0x7
+#define SECI_MODE_SHIFT 4
+#define SECI_UPD_SECI (1 << 7)
+
+#define SECI_SIGNOFF_0 0xDB
+#define SECI_SIGNOFF_1 0
+
+
+#define CLKCTL_STS_SECI_CLK_REQ (1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE (1 << 0)
+#define SECI_UART_MSR_RTS_STATE (1 << 1)
+#define SECI_UART_SECI_IN_STATE (1 << 2)
+#define SECI_UART_SECI_IN2_STATE (1 << 3)
+
+
+#define SECI_UART_LCR_STOP_BITS (1 << 0)
+#define SECI_UART_LCR_PARITY_EN (1 << 1)
+#define SECI_UART_LCR_PARITY (1 << 2)
+#define SECI_UART_LCR_RX_EN (1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL (1 << 4)
+#define SECI_UART_LCR_TXO_EN (1 << 5)
+#define SECI_UART_LCR_RTSO_EN (1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN (1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK (1 << 8)
+#define SECI_UART_LCR_TXCRC_INV (1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF (1 << 10)
+#define SECI_UART_LCR_TXCRC_EN (1 << 11)
+
+#define SECI_UART_MCR_TX_EN (1 << 0)
+#define SECI_UART_MCR_PRTS (1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN (1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN (1 << 4)
+#define SECI_UART_MCR_AUTO_RTS (1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT (1 << 9)
+
+
+
+
+#define ECI_BW_20 0x0
+#define ECI_BW_25 0x1
+#define ECI_BW_30 0x2
+#define ECI_BW_35 0x3
+#define ECI_BW_40 0x4
+#define ECI_BW_45 0x5
+#define ECI_BW_50 0x6
+#define ECI_BW_ALL 0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 000000000000..44d68329e660
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,275 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _SBCONFIG_H
+#define _SBCONFIG_H
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE 0x10000
+#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE)
+
+
+#define SBCONFIGOFF 0xf00
+#define SBCONFIGSIZE 256
+
+#define SBIPSFLAG 0x08
+#define SBTPSFLAG 0x18
+#define SBTMERRLOGA 0x48
+#define SBTMERRLOG 0x50
+#define SBADMATCH3 0x60
+#define SBADMATCH2 0x68
+#define SBADMATCH1 0x70
+#define SBIMSTATE 0x90
+#define SBINTVEC 0x94
+#define SBTMSTATELOW 0x98
+#define SBTMSTATEHIGH 0x9c
+#define SBBWA0 0xa0
+#define SBIMCONFIGLOW 0xa8
+#define SBIMCONFIGHIGH 0xac
+#define SBADMATCH0 0xb0
+#define SBTMCONFIGLOW 0xb8
+#define SBTMCONFIGHIGH 0xbc
+#define SBBCONFIG 0xc0
+#define SBBSTATE 0xc8
+#define SBACTCNFG 0xd8
+#define SBFLAGST 0xe8
+#define SBIDLOW 0xf8
+#define SBIDHIGH 0xfc
+
+
+
+#define SBIMERRLOGA 0xea8
+#define SBIMERRLOG 0xeb0
+#define SBTMPORTCONNID0 0xed8
+#define SBTMPORTLOCK0 0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+ uint32 PAD[2];
+ uint32 sbipsflag;
+ uint32 PAD[3];
+ uint32 sbtpsflag;
+ uint32 PAD[11];
+ uint32 sbtmerrloga;
+ uint32 PAD;
+ uint32 sbtmerrlog;
+ uint32 PAD[3];
+ uint32 sbadmatch3;
+ uint32 PAD;
+ uint32 sbadmatch2;
+ uint32 PAD;
+ uint32 sbadmatch1;
+ uint32 PAD[7];
+ uint32 sbimstate;
+ uint32 sbintvec;
+ uint32 sbtmstatelow;
+ uint32 sbtmstatehigh;
+ uint32 sbbwa0;
+ uint32 PAD;
+ uint32 sbimconfiglow;
+ uint32 sbimconfighigh;
+ uint32 sbadmatch0;
+ uint32 PAD;
+ uint32 sbtmconfiglow;
+ uint32 sbtmconfighigh;
+ uint32 sbbconfig;
+ uint32 PAD;
+ uint32 sbbstate;
+ uint32 PAD[3];
+ uint32 sbactcnfg;
+ uint32 PAD[3];
+ uint32 sbflagst;
+ uint32 PAD[3];
+ uint32 sbidlow;
+ uint32 sbidhigh;
+} sbconfig_t;
+
+#endif
+
+
+#define SBIPS_INT1_MASK 0x3f
+#define SBIPS_INT1_SHIFT 0
+#define SBIPS_INT2_MASK 0x3f00
+#define SBIPS_INT2_SHIFT 8
+#define SBIPS_INT3_MASK 0x3f0000
+#define SBIPS_INT3_SHIFT 16
+#define SBIPS_INT4_MASK 0x3f000000
+#define SBIPS_INT4_SHIFT 24
+
+
+#define SBTPS_NUM0_MASK 0x3f
+#define SBTPS_F0EN0 0x40
+
+
+#define SBTMEL_CM 0x00000007
+#define SBTMEL_CI 0x0000ff00
+#define SBTMEL_EC 0x0f000000
+#define SBTMEL_ME 0x80000000
+
+
+#define SBIM_PC 0xf
+#define SBIM_AP_MASK 0x30
+#define SBIM_AP_BOTH 0x00
+#define SBIM_AP_TS 0x10
+#define SBIM_AP_TK 0x20
+#define SBIM_AP_RSV 0x30
+#define SBIM_IBE 0x20000
+#define SBIM_TO 0x40000
+#define SBIM_BY 0x01800000
+#define SBIM_RJ 0x02000000
+
+
+#define SBTML_RESET 0x0001
+#define SBTML_REJ_MASK 0x0006
+#define SBTML_REJ 0x0002
+#define SBTML_TMPREJ 0x0004
+
+#define SBTML_SICF_SHIFT 16
+
+
+#define SBTMH_SERR 0x0001
+#define SBTMH_INT 0x0002
+#define SBTMH_BUSY 0x0004
+#define SBTMH_TO 0x0020
+
+#define SBTMH_SISF_SHIFT 16
+
+
+#define SBBWA_TAB0_MASK 0xffff
+#define SBBWA_TAB1_MASK 0xffff
+#define SBBWA_TAB1_SHIFT 16
+
+
+#define SBIMCL_STO_MASK 0x7
+#define SBIMCL_RTO_MASK 0x70
+#define SBIMCL_RTO_SHIFT 4
+#define SBIMCL_CID_MASK 0xff0000
+#define SBIMCL_CID_SHIFT 16
+
+
+#define SBIMCH_IEM_MASK 0xc
+#define SBIMCH_TEM_MASK 0x30
+#define SBIMCH_TEM_SHIFT 4
+#define SBIMCH_BEM_MASK 0xc0
+#define SBIMCH_BEM_SHIFT 6
+
+
+#define SBAM_TYPE_MASK 0x3
+#define SBAM_AD64 0x4
+#define SBAM_ADINT0_MASK 0xf8
+#define SBAM_ADINT0_SHIFT 3
+#define SBAM_ADINT1_MASK 0x1f8
+#define SBAM_ADINT1_SHIFT 3
+#define SBAM_ADINT2_MASK 0x1f8
+#define SBAM_ADINT2_SHIFT 3
+#define SBAM_ADEN 0x400
+#define SBAM_ADNEG 0x800
+#define SBAM_BASE0_MASK 0xffffff00
+#define SBAM_BASE0_SHIFT 8
+#define SBAM_BASE1_MASK 0xfffff000
+#define SBAM_BASE1_SHIFT 12
+#define SBAM_BASE2_MASK 0xffff0000
+#define SBAM_BASE2_SHIFT 16
+
+
+#define SBTMCL_CD_MASK 0xff
+#define SBTMCL_CO_MASK 0xf800
+#define SBTMCL_CO_SHIFT 11
+#define SBTMCL_IF_MASK 0xfc0000
+#define SBTMCL_IF_SHIFT 18
+#define SBTMCL_IM_MASK 0x3000000
+#define SBTMCL_IM_SHIFT 24
+
+
+#define SBTMCH_BM_MASK 0x3
+#define SBTMCH_RM_MASK 0x3
+#define SBTMCH_RM_SHIFT 2
+#define SBTMCH_SM_MASK 0x30
+#define SBTMCH_SM_SHIFT 4
+#define SBTMCH_EM_MASK 0x300
+#define SBTMCH_EM_SHIFT 8
+#define SBTMCH_IM_MASK 0xc00
+#define SBTMCH_IM_SHIFT 10
+
+
+#define SBBC_LAT_MASK 0x3
+#define SBBC_MAX0_MASK 0xf0000
+#define SBBC_MAX0_SHIFT 16
+#define SBBC_MAX1_MASK 0xf00000
+#define SBBC_MAX1_SHIFT 20
+
+
+#define SBBS_SRD 0x1
+#define SBBS_HRD 0x2
+
+
+#define SBIDL_CS_MASK 0x3
+#define SBIDL_AR_MASK 0x38
+#define SBIDL_AR_SHIFT 3
+#define SBIDL_SYNCH 0x40
+#define SBIDL_INIT 0x80
+#define SBIDL_MINLAT_MASK 0xf00
+#define SBIDL_MINLAT_SHIFT 8
+#define SBIDL_MAXLAT 0xf000
+#define SBIDL_MAXLAT_SHIFT 12
+#define SBIDL_FIRST 0x10000
+#define SBIDL_CW_MASK 0xc0000
+#define SBIDL_CW_SHIFT 18
+#define SBIDL_TP_MASK 0xf00000
+#define SBIDL_TP_SHIFT 20
+#define SBIDL_IP_MASK 0xf000000
+#define SBIDL_IP_SHIFT 24
+#define SBIDL_RV_MASK 0xf0000000
+#define SBIDL_RV_SHIFT 28
+#define SBIDL_RV_2_2 0x00000000
+#define SBIDL_RV_2_3 0x10000000
+
+
+#define SBIDH_RC_MASK 0x000f
+#define SBIDH_RCE_MASK 0x7000
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000
+#define SBIDH_VC_SHIFT 16
+
+#define SB_COMMIT 0xfd8
+
+
+#define SB_VEND_BCM 0x4243
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 000000000000..da1f1a10a653
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,370 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 addr;
+ uint32 ptr;
+ uint32 status;
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt;
+ dma32regs_t rcv;
+} dma32regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma32diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl;
+ uint32 addr;
+} dma32dd_t;
+
+
+#define D32RINGALIGN_BITS 12
+#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
+#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
+
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define XC_XE ((uint32)1 << 0)
+#define XC_SE ((uint32)1 << 1)
+#define XC_LE ((uint32)1 << 2)
+#define XC_FL ((uint32)1 << 4)
+#define XC_MR_MASK 0x000000C0
+#define XC_MR_SHIFT 6
+#define XC_PD ((uint32)1 << 11)
+#define XC_AE ((uint32)3 << 16)
+#define XC_AE_SHIFT 16
+#define XC_BL_MASK 0x001C0000
+#define XC_BL_SHIFT 18
+#define XC_PC_MASK 0x00E00000
+#define XC_PC_SHIFT 21
+#define XC_PT_MASK 0x03000000
+#define XC_PT_SHIFT 24
+
+
+#define DMA_MR_1 0
+#define DMA_MR_2 1
+
+
+
+#define DMA_BL_16 0
+#define DMA_BL_32 1
+#define DMA_BL_64 2
+#define DMA_BL_128 3
+#define DMA_BL_256 4
+#define DMA_BL_512 5
+#define DMA_BL_1024 6
+
+
+#define DMA_PC_0 0
+#define DMA_PC_4 1
+#define DMA_PC_8 2
+#define DMA_PC_16 3
+
+
+
+#define DMA_PT_1 0
+#define DMA_PT_2 1
+#define DMA_PT_4 2
+#define DMA_PT_8 3
+
+
+#define XP_LD_MASK 0xfff
+
+
+#define XS_CD_MASK 0x0fff
+#define XS_XS_MASK 0xf000
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000
+#define XS_XS_ACTIVE 0x1000
+#define XS_XS_IDLE 0x2000
+#define XS_XS_STOPPED 0x3000
+#define XS_XS_SUSP 0x4000
+#define XS_XE_MASK 0xf0000
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000
+#define XS_XE_DPE 0x10000
+#define XS_XE_DFU 0x20000
+#define XS_XE_BEBR 0x30000
+#define XS_XE_BEDA 0x40000
+#define XS_AD_MASK 0xfff00000
+#define XS_AD_SHIFT 20
+
+
+#define RC_RE ((uint32)1 << 0)
+#define RC_RO_MASK 0xfe
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8)
+#define RC_SH ((uint32)1 << 9)
+#define RC_OC ((uint32)1 << 10)
+#define RC_PD ((uint32)1 << 11)
+#define RC_AE ((uint32)3 << 16)
+#define RC_AE_SHIFT 16
+#define RC_BL_MASK 0x001C0000
+#define RC_BL_SHIFT 18
+#define RC_PC_MASK 0x00E00000
+#define RC_PC_SHIFT 21
+#define RC_PT_MASK 0x03000000
+#define RC_PT_SHIFT 24
+
+
+#define RP_LD_MASK 0xfff
+
+
+#define RS_CD_MASK 0x0fff
+#define RS_RS_MASK 0xf000
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000
+#define RS_RS_ACTIVE 0x1000
+#define RS_RS_IDLE 0x2000
+#define RS_RS_STOPPED 0x3000
+#define RS_RE_MASK 0xf0000
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000
+#define RS_RE_DPE 0x10000
+#define RS_RE_DFO 0x20000
+#define RS_RE_BEBW 0x30000
+#define RS_RE_BEDA 0x40000
+#define RS_AD_MASK 0xfff00000
+#define RS_AD_SHIFT 20
+
+
+#define FA_OFF_MASK 0xffff
+#define FA_SEL_MASK 0xf0000
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000
+#define FA_SEL_XDP 0x10000
+#define FA_SEL_RDD 0x40000
+#define FA_SEL_RDP 0x50000
+#define FA_SEL_XFD 0x80000
+#define FA_SEL_XFP 0x90000
+#define FA_SEL_RFD 0xc0000
+#define FA_SEL_RFP 0xd0000
+#define FA_SEL_RSD 0xe0000
+#define FA_SEL_RSP 0xf0000
+
+
+#define CTRL_BC_MASK 0x00001fff
+#define CTRL_AE ((uint32)3 << 16)
+#define CTRL_AE_SHIFT 16
+#define CTRL_PARITY ((uint32)3 << 18)
+#define CTRL_EOT ((uint32)1 << 28)
+#define CTRL_IOC ((uint32)1 << 29)
+#define CTRL_EOF ((uint32)1 << 30)
+#define CTRL_SOF ((uint32)1 << 31)
+
+
+#define CTRL_CORE_MASK 0x0ff00000
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 ptr;
+ uint32 addrlow;
+ uint32 addrhigh;
+ uint32 status0;
+ uint32 status1;
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx;
+ dma64regs_t rx;
+} dma64regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma64diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl1;
+ uint32 ctrl2;
+ uint32 addrlow;
+ uint32 addrhigh;
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define D64_XC_XE 0x00000001
+#define D64_XC_SE 0x00000002
+#define D64_XC_LE 0x00000004
+#define D64_XC_FL 0x00000010
+#define D64_XC_MR_MASK 0x000000C0
+#define D64_XC_MR_SHIFT 6
+#define D64_XC_PD 0x00000800
+#define D64_XC_AE 0x00030000
+#define D64_XC_AE_SHIFT 16
+#define D64_XC_BL_MASK 0x001C0000
+#define D64_XC_BL_SHIFT 18
+#define D64_XC_PC_MASK 0x00E00000
+#define D64_XC_PC_SHIFT 21
+#define D64_XC_PT_MASK 0x03000000
+#define D64_XC_PT_SHIFT 24
+
+
+#define D64_XP_LD_MASK 0x00001fff
+
+
+#define D64_XS0_CD_MASK 0x00001fff
+#define D64_XS0_XS_MASK 0xf0000000
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000
+#define D64_XS0_XS_ACTIVE 0x10000000
+#define D64_XS0_XS_IDLE 0x20000000
+#define D64_XS0_XS_STOPPED 0x30000000
+#define D64_XS0_XS_SUSP 0x40000000
+
+#define D64_XS1_AD_MASK 0x00001fff
+#define D64_XS1_XE_MASK 0xf0000000
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000
+#define D64_XS1_XE_DPE 0x10000000
+#define D64_XS1_XE_DFU 0x20000000
+#define D64_XS1_XE_DTE 0x30000000
+#define D64_XS1_XE_DESRE 0x40000000
+#define D64_XS1_XE_COREE 0x50000000
+
+
+#define D64_RC_RE 0x00000001
+#define D64_RC_RO_MASK 0x000000fe
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100
+#define D64_RC_SH 0x00000200
+#define D64_RC_OC 0x00000400
+#define D64_RC_PD 0x00000800
+#define D64_RC_AE 0x00030000
+#define D64_RC_AE_SHIFT 16
+#define D64_RC_BL_MASK 0x001C0000
+#define D64_RC_BL_SHIFT 18
+#define D64_RC_PC_MASK 0x00E00000
+#define D64_RC_PC_SHIFT 21
+#define D64_RC_PT_MASK 0x03000000
+#define D64_RC_PT_SHIFT 24
+
+
+#define DMA_CTRL_PEN (1 << 0)
+#define DMA_CTRL_ROC (1 << 1)
+#define DMA_CTRL_RXMULTI (1 << 2)
+#define DMA_CTRL_UNFRAMED (1 << 3)
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)
+
+
+#define D64_RP_LD_MASK 0x00001fff
+
+
+#define D64_RS0_CD_MASK 0x00001fff
+#define D64_RS0_RS_MASK 0xf0000000
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000
+#define D64_RS0_RS_ACTIVE 0x10000000
+#define D64_RS0_RS_IDLE 0x20000000
+#define D64_RS0_RS_STOPPED 0x30000000
+#define D64_RS0_RS_SUSP 0x40000000
+
+#define D64_RS1_AD_MASK 0x0001ffff
+#define D64_RS1_RE_MASK 0xf0000000
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000
+#define D64_RS1_RE_DPO 0x10000000
+#define D64_RS1_RE_DFU 0x20000000
+#define D64_RS1_RE_DTE 0x30000000
+#define D64_RS1_RE_DESRE 0x40000000
+#define D64_RS1_RE_COREE 0x50000000
+
+
+#define D64_FA_OFF_MASK 0xffff
+#define D64_FA_SEL_MASK 0xf0000
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000
+#define D64_FA_SEL_XDP 0x10000
+#define D64_FA_SEL_RDD 0x40000
+#define D64_FA_SEL_RDP 0x50000
+#define D64_FA_SEL_XFD 0x80000
+#define D64_FA_SEL_XFP 0x90000
+#define D64_FA_SEL_RFD 0xc0000
+#define D64_FA_SEL_RFP 0xd0000
+#define D64_FA_SEL_RSD 0xe0000
+#define D64_FA_SEL_RSP 0xf0000
+
+
+#define D64_CTRL_COREFLAGS 0x0ff00000
+#define D64_CTRL1_EOT ((uint32)1 << 28)
+#define D64_CTRL1_IOC ((uint32)1 << 29)
+#define D64_CTRL1_EOF ((uint32)1 << 30)
+#define D64_CTRL1_SOF ((uint32)1 << 31)
+
+
+#define D64_CTRL2_BC_MASK 0x00007fff
+#define D64_CTRL2_AE 0x00030000
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000
+
+
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff
+#define D64_RX_FRM_STS_OVFL 0x00800000
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000
+
+
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} dma_rxh_t;
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 000000000000..6ad98b521e76
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,108 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h 326494 2012-04-09 13:29:57Z $
+ */
+
+#ifndef _SBPCMCIA_H
+#define _SBPCMCIA_H
+
+
+
+
+#define PCMCIA_FCR (0x700 / 2)
+
+#define FCR0_OFF 0
+#define FCR1_OFF (0x40 / 2)
+#define FCR2_OFF (0x80 / 2)
+#define FCR3_OFF (0xc0 / 2)
+
+#define PCMCIA_FCR0 (0x700 / 2)
+#define PCMCIA_FCR1 (0x740 / 2)
+#define PCMCIA_FCR2 (0x780 / 2)
+#define PCMCIA_FCR3 (0x7c0 / 2)
+
+
+
+#define PCMCIA_COR 0
+
+#define COR_RST 0x80
+#define COR_LEV 0x40
+#define COR_IRQEN 0x04
+#define COR_BLREN 0x01
+#define COR_FUNEN 0x01
+
+
+#define PCICIA_FCSR (2 / 2)
+#define PCICIA_PRR (4 / 2)
+#define PCICIA_SCR (6 / 2)
+#define PCICIA_ESR (8 / 2)
+
+
+#define PCM_MEMOFF 0x0000
+#define F0_MEMOFF 0x1000
+#define F1_MEMOFF 0x2000
+#define F2_MEMOFF 0x3000
+#define F3_MEMOFF 0x4000
+
+
+#define MEM_ADDR0 (0x728 / 2)
+#define MEM_ADDR1 (0x72a / 2)
+#define MEM_ADDR2 (0x72c / 2)
+
+
+#define PCMCIA_ADDR0 (0x072e / 2)
+#define PCMCIA_ADDR1 (0x0730 / 2)
+#define PCMCIA_ADDR2 (0x0732 / 2)
+
+#define MEM_SEG (0x0734 / 2)
+#define SROM_CS (0x0736 / 2)
+#define SROM_DATAL (0x0738 / 2)
+#define SROM_DATAH (0x073a / 2)
+#define SROM_ADDRL (0x073c / 2)
+#define SROM_ADDRH (0x073e / 2)
+#define SROM_INFO2 (0x0772 / 2)
+#define SROM_INFO (0x07be / 2)
+
+
+#define SROM_IDLE 0
+#define SROM_WRITE 1
+#define SROM_READ 2
+#define SROM_WEN 4
+#define SROM_WDS 7
+#define SROM_DONE 8
+
+
+#define SRI_SZ_MASK 0x03
+#define SRI_BLANK 0x04
+#define SRI_OTP 0x80
+
+
+
+#define SBTML_INT_ACK 0x40000
+#define SBTML_INT_EN 0x20000
+
+
+#define SBTMH_INT_STATUS 0x40000
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 000000000000..4b5a1cf465fc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,187 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h 308945 2012-01-18 02:15:27Z $
+ */
+
+#ifndef _SBSDIO_H
+#define _SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1
+#define SBSDIO_FUNC1_SLEEPCSR 0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE 0
+#define SBSDIO_SPROM_WRITE 1
+#define SBSDIO_SPROM_READ 2
+#define SBSDIO_SPROM_WEN 4
+#define SBSDIO_SPROM_WDS 7
+#define SBSDIO_SPROM_DONE 8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
+#define SROM_OTP 0x80 /* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
+ * 1: power on oscillator
+ * (for 4318 only)
+ */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
+ * to wait before sending data to host
+ */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK 0x7f
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL 0x40
+#define SBSDIO_Rev8_ALP_AVAIL 0x80
+#define SBSDIO_CSR_MASK 0x1F
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
+ * 8th byte
+ */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
+ * data comamnd
+ */
+
+#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
+
+#endif /* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 000000000000..2c5953568051
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,293 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sbsdpcmdev_h_
+#define _sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ dma64regs_t xmt; /* dma tx */
+ uint32 PAD[2];
+ dma64regs_t rcv; /* dma rx */
+ uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+ dma64p_t dma64regs[2];
+ dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
+ uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+ dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
+ uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+ dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
+ uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+ uint32 corecontrol; /* CoreControl, 0x000, rev8 */
+ uint32 corestatus; /* CoreStatus, 0x004, rev8 */
+ uint32 PAD[1];
+ uint32 biststatus; /* BistStatus, 0x00c, rev8 */
+
+ /* PCMCIA access */
+ uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
+ uint16 PAD[1];
+
+ /* interrupt */
+ uint32 intstatus; /* IntStatus, 0x020, rev8 */
+ uint32 hostintmask; /* IntHostMask, 0x024, rev8 */
+ uint32 intmask; /* IntSbMask, 0x028, rev8 */
+ uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
+ uint32 sbintmask; /* SBIntMask, 0x030, rev8 */
+ uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */
+ uint32 PAD[2];
+ uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
+ uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
+ uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
+ uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
+ uint32 PAD[3];
+
+ /* PCMCIA frame control */
+ uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
+ uint8 PAD[3];
+ uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
+ uint8 PAD[155];
+
+ /* interrupt batching control */
+ uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
+ uint32 PAD[3];
+
+ /* counters */
+ uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+ uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+ uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+ uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+ uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+ uint32 PAD[40];
+ uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
+ uint32 PAD[7];
+
+ /* DMA engines */
+ volatile union {
+ pcmdma32_t pcm32;
+ sdiodma32_t sdiod32;
+ sdiodma64_t sdiod64;
+ } dma;
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
+ uint16 PAD[55];
+
+ /* PCMCIA backplane access */
+ uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
+ uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
+ uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
+ uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
+ uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
+ uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
+ uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
+ uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
+ uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
+ uint16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
+ uint32 PAD[464];
+
+ /* Sonics SiliconBackplane registers */
+ sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
+#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
+#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
+#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
+#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
+#define SDA_WRITE 0x01000000 /* Write bit */
+#define SDA_READ 0x00000000 /* Write bit cleared for Read */
+#define SDA_BUSY 0x80000000 /* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
+#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
+#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
+#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
+#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
+#define SDA_SBADDRMID 0x00b /* SbAddrMid */
+#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
+#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
+#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
+#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+
+/* intrcvlazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/* rx header */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC 0x0001 /* CRC error detected */
+#define RXF_WOOS 0x0002 /* write frame out of sync */
+#define RXF_WF_TERM 0x0004 /* write frame terminated */
+#define RXF_ABORT 0x0008 /* write frame aborted */
+#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif /* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 000000000000..852d1151bc5a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,193 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $
+ */
+
+#ifndef _SBSOCRAM_H
+#define _SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+typedef volatile struct sbsocramregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[1];
+ uint32 bankinfo;
+ uint32 PAD[15];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl;
+ uint32 PAD[133];
+ uint32 sr_control;
+ uint32 sr_status;
+ uint32 sr_address;
+ uint32 sr_data;
+} sbsocramregs_t;
+
+#endif
+
+
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+
+#define SRCI_PT_MASK 0x00070000
+#define SRCI_PT_SHIFT 16
+
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SR_BSZ_BASE 14
+
+
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+
+#define SOCRAM_BANKINFO_SZMASK 0x7f
+#define SOCRAM_BANKIDX_ROM_MASK 0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
+
+#define SOCRAM_MEMTYPE_RAM 0
+#define SOCRAM_MEMTYPE_R0M 1
+#define SOCRAM_MEMTYPE_DEVRAM 2
+
+#define SOCRAM_BANKINFO_REG 0x40
+#define SOCRAM_BANKIDX_REG 0x10
+#define SOCRAM_BANKINFO_STDBY_MASK 0x400
+#define SOCRAM_BANKINFO_STDBY_TIMER 0x800
+
+
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT 17
+#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000
+
+
+#define SOCRAM_DEVRAMBANK_MASK 0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT 12
+
+
+#define SOCRAM_BANKINFO_SZBASE 8192
+#define SOCRAM_BANKSIZE_SHIFT 13
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 000000000000..b8eee1ffb405
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,617 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h 308973 2012-01-18 04:19:34Z $
+ */
+
+#ifndef _SDIO_H
+#define _SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+ uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */
+ uint8 sd_rev; /* RO, sd spec revision */
+ uint8 io_en; /* I/O enable */
+ uint8 io_rdy; /* I/O ready reg */
+ uint8 intr_ctl; /* Master and per function interrupt enable control */
+ uint8 intr_status; /* RO, interrupt pending status */
+ uint8 io_abort; /* read/write abort or reset all functions */
+ uint8 bus_inter; /* bus interface control */
+ uint8 capability; /* RO, card capability */
+
+ uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
+ uint8 cis_base_mid;
+ uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
+
+ /* suspend/resume registers */
+ uint8 bus_suspend; /* 0xC */
+ uint8 func_select; /* 0xD */
+ uint8 exec_flag; /* 0xE */
+ uint8 ready_flag; /* 0xF */
+
+ uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
+
+ uint8 power_control; /* 0x12 (SDIO version 1.10) */
+
+ uint8 speed_control; /* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV 0x00
+#define SDIOD_CCCR_SDREV 0x01
+#define SDIOD_CCCR_IOEN 0x02
+#define SDIOD_CCCR_IORDY 0x03
+#define SDIOD_CCCR_INTEN 0x04
+#define SDIOD_CCCR_INTPEND 0x05
+#define SDIOD_CCCR_IOABORT 0x06
+#define SDIOD_CCCR_BICTRL 0x07
+#define SDIOD_CCCR_CAPABLITIES 0x08
+#define SDIOD_CCCR_CISPTR_0 0x09
+#define SDIOD_CCCR_CISPTR_1 0x0A
+#define SDIOD_CCCR_CISPTR_2 0x0B
+#define SDIOD_CCCR_BUSSUSP 0x0C
+#define SDIOD_CCCR_FUNCSEL 0x0D
+#define SDIOD_CCCR_EXECFLAGS 0x0E
+#define SDIOD_CCCR_RDYFLAGS 0x0F
+#define SDIOD_CCCR_BLKSIZE_0 0x10
+#define SDIOD_CCCR_BLKSIZE_1 0x11
+#define SDIOD_CCCR_POWER_CONTROL 0x12
+#define SDIOD_CCCR_SPEED_CONTROL 0x13
+#define SDIOD_CCCR_UHSI_SUPPORT 0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH 0x15
+#define SDIOD_CCCR_INTR_EXTN 0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP 0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
+#define SDIOD_CCCR_BRCM_CARDCTL 0xf1
+#define SDIOD_CCCR_BRCM_SEPINT 0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK 0x0f /* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
+#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
+#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
+#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
+#define SDIO_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
+#define SDIO_CAP_SRW 0x04 /* support read wait */
+#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
+#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
+#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S 1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S 0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S 0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S 4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S 0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S 1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+ uint8 devctr; /* device interface, CSA control */
+ uint8 ext_dev; /* extended standard I/O device type code */
+ uint8 pwr_sel; /* power selection support */
+ uint8 PAD[6]; /* reserved */
+
+ uint8 cis_low; /* CIS LSB */
+ uint8 cis_mid;
+ uint8 cis_high; /* CIS MSB */
+ uint8 csa_low; /* code storage area, LSB */
+ uint8 csa_mid;
+ uint8 csa_high; /* code storage area, MSB */
+ uint8 csa_dat_win; /* data access window to function */
+
+ uint8 fnx_blk_size[2]; /* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS 8
+#define SDIOD_MAX_IOFUNCS 7
+
+/* SDIO Device FBR Start Address */
+#define SDIOD_FBR_STARTADDR 0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE 0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n) ((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
+#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0 0x09
+#define SDIOD_FBR_CISPTR_1 0x0A
+#define SDIOD_FBR_CISPTR_2 0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0 0x0C
+#define SDIOD_FBR_CSA_ADDR_1 0x0D
+#define SDIOD_FBR_CSA_ADDR_2 0x0E
+#define SDIOD_FBR_CSA_DATA 0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0 0x10
+#define SDIOD_FBR_BLKSIZE_1 0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
+#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART 1
+#define SDIOD_DIC_BLUETOOTH_A 2
+#define SDIOD_DIC_BLUETOOTH_B 3
+#define SDIOD_DIC_GPS 4
+#define SDIOD_DIC_CAMERA 5
+#define SDIOD_DIC_PHS 6
+#define SDIOD_DIC_WLAN 7
+#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
+#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+#define SDIO_FUNC_3 3
+#define SDIO_FUNC_4 4
+#define SDIO_FUNC_5 5
+#define SDIO_FUNC_6 6
+#define SDIO_FUNC_7 7
+
+#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
+#define SD_CARD_TYPE_IO 1 /* IO only card */
+#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
+#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE 31
+#define CARDREG_STATUS_BIT_COMCRCERROR 23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
+#define CARDREG_STATUS_BIT_ERROR 19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+
+
+#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND 1
+#define SD_CMD_MMC_SET_RCA 3
+#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD 7
+#define SD_CMD_SEND_CSD 9
+#define SD_CMD_SEND_CID 10
+#define SD_CMD_STOP_TRANSMISSION 12
+#define SD_CMD_SEND_STATUS 13
+#define SD_CMD_GO_INACTIVE_STATE 15
+#define SD_CMD_SET_BLOCKLEN 16
+#define SD_CMD_READ_SINGLE_BLOCK 17
+#define SD_CMD_READ_MULTIPLE_BLOCK 18
+#define SD_CMD_WRITE_BLOCK 24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
+#define SD_CMD_PROGRAM_CSD 27
+#define SD_CMD_SET_WRITE_PROT 28
+#define SD_CMD_CLR_WRITE_PROT 29
+#define SD_CMD_SEND_WRITE_PROT 30
+#define SD_CMD_ERASE_WR_BLK_START 32
+#define SD_CMD_ERASE_WR_BLK_END 33
+#define SD_CMD_ERASE 38
+#define SD_CMD_LOCK_UNLOCK 42
+#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
+#define SD_CMD_APP_CMD 55
+#define SD_CMD_GEN_CMD 56
+#define SD_CMD_READ_OCR 58
+#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS 13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
+#define SD_ACMD_SD_SEND_OP_COND 41
+#define SD_ACMD_SET_CLR_CARD_DETECT 42
+#define SD_ACMD_SEND_SCR 51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ 0 /* Read_Write: Read */
+#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
+#define SD_IO_RW_NORMAL 0 /* no RAW */
+#define SD_IO_RW_RAW 1 /* RAW */
+#define SD_IO_BYTE_MODE 0 /* Byte Mode */
+#define SD_IO_BLOCK_MODE 1 /* BlockMode */
+#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+ (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+ (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE 0
+#define SD_RSP_NO_1 1
+#define SD_RSP_NO_2 2
+#define SD_RSP_NO_3 3
+#define SD_RSP_NO_4 4
+#define SD_RSP_NO_5 5
+#define SD_RSP_NO_6 6
+
+ /* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
+#define SD_RSP_MR6_ERROR 0x2000
+
+ /* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT 0x80
+#define SD_RSP_MR1_PARAMETER_ERROR 0x40
+#define SD_RSP_MR1_RFU5 0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
+#define SD_RSP_MR1_COM_CRC_ERROR 0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
+#define SD_RSP_MR1_RFU1 0x02
+#define SD_RSP_MR1_IDLE_STATE 0x01
+
+ /* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR 0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
+#define SD_RSP_R5_ERROR 0x08
+#define SD_RSP_R5_RFU 0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
+#define SD_RSP_R5_OUT_OF_RANGE 0x01
+
+#define SD_RSP_R5_ERRBITS 0xCB
+
+
+/* ------------------------------------------------
+ * SDIO Commands and responses
+ *
+ * I/O only commands are:
+ * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0 0
+#define SDIOH_CMD_3 3
+#define SDIOH_CMD_5 5
+#define SDIOH_CMD_7 7
+#define SDIOH_CMD_11 11
+#define SDIOH_CMD_14 14
+#define SDIOH_CMD_15 15
+#define SDIOH_CMD_19 19
+#define SDIOH_CMD_52 52
+#define SDIOH_CMD_53 53
+#define SDIOH_CMD_59 59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE 0
+#define SDIOH_RSP_R1 1
+#define SDIOH_RSP_R2 2
+#define SDIOH_RSP_R3 3
+#define SDIOH_RSP_R4 4
+#define SDIOH_RSP_R5 5
+#define SDIOH_RSP_R6 6
+
+/*
+ * SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS 0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M BITFIELD_MASK(24)
+#define CMD5_OCR_S 0
+
+#define CMD5_S18R_M BITFIELD_MASK(1)
+#define CMD5_S18R_S 24
+
+#define CMD7_RCA_M BITFIELD_MASK(16)
+#define CMD7_RCA_S 16
+
+#define CMD14_RCA_M BITFIELD_MASK(16)
+#define CMD14_RCA_S 16
+#define CMD14_SLEEP_M BITFIELD_MASK(1)
+#define CMD14_SLEEP_S 15
+
+#define CMD_15_RCA_M BITFIELD_MASK(16)
+#define CMD_15_RCA_S 16
+
+#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
+ */
+#define CMD52_DATA_S 0
+#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD52_REG_ADDR_S 9
+#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
+#define CMD52_RAW_S 27
+#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD52_FUNCTION_S 28
+#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD52_RW_FLAG_S 31
+
+
+#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S 0
+#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD53_REG_ADDR_S 9
+#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
+#define CMD53_OP_CODE_S 26
+#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
+#define CMD53_BLK_MODE_S 27
+#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD53_FUNCTION_S 28
+#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD53_RW_FLAG_S 31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ * -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S 0
+
+#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S 24
+
+#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S 24
+#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
+#define RSP4_MEM_PRESENT_S 27
+#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S 28
+#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
+#define RSP4_CARD_READY_S 31
+
+#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
+ */
+#define RSP6_STATUS_S 0
+#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S 16
+
+#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S 3
+#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP1_APP_CMD_S 5
+#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S 8
+#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
+ * when Cmd was received
+ */
+#define RSP1_CURR_STATE_S 9
+#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
+#define RSP1_EARSE_RESET_S 13
+#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S 14
+#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S 15
+#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
+ * of CSD
+ */
+#define RSP1_CID_CSD_OVERW_S 16
+#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
+#define RSP1_ERROR_S 19
+#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
+#define RSP1_CC_ERROR_S 20
+#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
+ * to correct data
+ */
+#define RSP1_CARD_ECC_FAILED_S 21
+#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S 22
+#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
+ */
+#define RSP1_COM_CRC_ERROR_S 23
+#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S 24
+#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
+#define RSP1_CARD_LOCKED_S 25
+#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
+ * write-protected blocks
+ */
+#define RSP1_WP_VIOLATION_S 26
+#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S 27
+#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S 28
+#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
+#define RSP1_BLK_LEN_ERR_S 29
+#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
+#define RSP1_ADDR_ERR_S 30
+#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S 31
+
+
+#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
+#define RSP5_DATA_S 0
+#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
+#define RSP5_FLAGS_S 8
+#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S 16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S 0
+#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
+#define SPIRSP4_STUFF_S 16
+#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
+#define SPIRSP4_MEM_PRESENT_S 19
+#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S 20
+#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
+#define SPIRSP4_CARD_READY_S 23
+#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
+#define SPIRSP4_IDLE_STATE_S 24
+#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S 26
+#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S 27
+#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP4_FUNC_NUM_ERROR_S 28
+#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S 30
+#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP4_START_BIT_S 31
+
+#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
+#define SPIRSP5_DATA_S 16
+#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
+#define SPIRSP5_IDLE_STATE_S 24
+#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S 26
+#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S 27
+#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP5_FUNC_NUM_ERROR_S 28
+#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S 30
+#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP5_START_BIT_S 31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
+ */
+#define RSP6STAT_AKE_SEQ_ERROR_S 3
+#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP6STAT_APP_CMD_S 5
+#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
+ * (buff empty)
+ */
+#define RSP6STAT_READY_FOR_DATA_S 8
+#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
+ * Cmd reception
+ */
+#define RSP6STAT_CURR_STATE_S 9
+#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
+ */
+#define RSP6STAT_ERROR_S 13
+#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
+ * card state Bit 22
+ */
+#define RSP6STAT_ILLEGAL_CMD_S 14
+#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
+ * failed Bit 23
+ */
+#define RSP6STAT_COM_CRC_ERROR_S 15
+
+#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT 0
+#define CMD_OPTION_TUNING 1
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 000000000000..e847a52babed
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,441 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h 299859 2011-12-01 03:53:27Z $
+ */
+
+#ifndef _SDIOH_H
+#define _SDIOH_H
+
+#define SD_SysAddr 0x000
+#define SD_BlockSize 0x004
+#define SD_BlockCount 0x006
+#define SD_Arg0 0x008
+#define SD_Arg1 0x00A
+#define SD_TransferMode 0x00C
+#define SD_Command 0x00E
+#define SD_Response0 0x010
+#define SD_Response1 0x012
+#define SD_Response2 0x014
+#define SD_Response3 0x016
+#define SD_Response4 0x018
+#define SD_Response5 0x01A
+#define SD_Response6 0x01C
+#define SD_Response7 0x01E
+#define SD_BufferDataPort0 0x020
+#define SD_BufferDataPort1 0x022
+#define SD_PresentState 0x024
+#define SD_HostCntrl 0x028
+#define SD_PwrCntrl 0x029
+#define SD_BlockGapCntrl 0x02A
+#define SD_WakeupCntrl 0x02B
+#define SD_ClockCntrl 0x02C
+#define SD_TimeoutCntrl 0x02E
+#define SD_SoftwareReset 0x02F
+#define SD_IntrStatus 0x030
+#define SD_ErrorIntrStatus 0x032
+#define SD_IntrStatusEnable 0x034
+#define SD_ErrorIntrStatusEnable 0x036
+#define SD_IntrSignalEnable 0x038
+#define SD_ErrorIntrSignalEnable 0x03A
+#define SD_CMD12ErrorStatus 0x03C
+#define SD_Capabilities 0x040
+#define SD_Capabilities3 0x044
+#define SD_MaxCurCap 0x048
+#define SD_MaxCurCap_Reserved 0x04C
+#define SD_ADMA_ErrStatus 0x054
+#define SD_ADMA_SysAddr 0x58
+#define SD_SlotInterruptStatus 0x0FC
+#define SD_HostControllerVersion 0x0FE
+#define SD_GPIO_Reg 0x100
+#define SD_GPIO_OE 0x104
+#define SD_GPIO_Enable 0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo 0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2 0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart 0x060
+#define SD3_PresetValCount 8
+/* preset-indiv regs */
+#define SD3_PresetVal_init 0x060
+#define SD3_PresetVal_default 0x062
+#define SD3_PresetVal_HS 0x064
+#define SD3_PresetVal_SDR12 0x066
+#define SD3_PresetVal_SDR25 0x068
+#define SD3_PresetVal_SDR50 0x06a
+#define SD3_PresetVal_SDR104 0x06c
+#define SD3_PresetVal_DDR50 0x06e
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX 0
+#define SD3_PRESETVAL_DESPEED_IX 1
+#define SD3_PRESETVAL_HISPEED_IX 2
+#define SD3_PRESETVAL_SDR12_IX 3
+#define SD3_PRESETVAL_SDR25_IX 4
+#define SD3_PRESETVAL_SDR50_IX 5
+#define SD3_PRESETVAL_SDR104_IX 6
+#define SD3_PRESETVAL_DDR50_IX 7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 0
+#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+ bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M BITFIELD_MASK(8)
+#define CAP_BASECLK_S 8
+#define CAP_MAXBLOCK_M BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S 16
+#define CAP_ADMA2_M BITFIELD_MASK(1)
+#define CAP_ADMA2_S 19
+#define CAP_ADMA1_M BITFIELD_MASK(1)
+#define CAP_ADMA1_S 20
+#define CAP_HIGHSPEED_M BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S 21
+#define CAP_DMA_M BITFIELD_MASK(1)
+#define CAP_DMA_S 22
+#define CAP_SUSPEND_M BITFIELD_MASK(1)
+#define CAP_SUSPEND_S 23
+#define CAP_VOLT_3_3_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S 24
+#define CAP_VOLT_3_0_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S 25
+#define CAP_VOLT_1_8_M BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S 26
+#define CAP_64BIT_HOST_M BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S 28
+
+#define SDIO_OCR_READ_FAIL (2)
+
+
+#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S 29
+
+#define CAP_SLOTTYPE_M BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S 30
+
+#define CAP3_MSBits_OFFSET (32)
+/* note: following are caps MSB32 bits.
+ So the bits start from 0, instead of 32. that is why
+ CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S 14
+
+#define PRESET_CLK_DIV_M BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S 0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S 0
+#define CAP_CURR_3_0_M BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S 8
+#define CAP_CURR_1_8_M BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S 16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S 0
+#define BLKSZ_BNDRY_M BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S 12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S 0
+#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S 1
+#define XFER_CMD_12_EN_M BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 2
+#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S 4
+#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S 5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 0
+#define RESP_TYPE_136 1
+#define RESP_TYPE_48 2
+#define RESP_TYPE_48_BUSY 3
+/* type field */
+#define CMD_TYPE_NORMAL 0
+#define CMD_TYPE_SUSPEND 1
+#define CMD_TYPE_RESUME 2
+#define CMD_TYPE_ABORT 3
+
+#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */
+#define CMD_RESP_TYPE_S 0
+#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */
+#define CMD_CRC_EN_S 3
+#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */
+#define CMD_INDEX_EN_S 4
+#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */
+#define CMD_DATA_EN_S 5
+#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc
+ */
+#define CMD_TYPE_S 6
+#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */
+#define CMD_INDEX_S 8
+
+/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */
+/* SD_PresentState : Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */
+#define PRES_CMD_INHIBIT_S 0
+#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */
+#define PRES_DAT_INHIBIT_S 1
+#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */
+#define PRES_DAT_BUSY_S 2
+#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */
+#define PRES_PRESENT_RSVD_S 3
+#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */
+#define PRES_WRITE_ACTIVE_S 8
+#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */
+#define PRES_READ_ACTIVE_S 9
+#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S 10
+#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */
+#define PRES_READ_DATA_RDY_S 11
+#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */
+#define PRES_CARD_PRESENT_S 16
+#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */
+#define PRES_CARD_STABLE_S 17
+#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */
+#define PRES_CARD_PRESENT_RAW_S 18
+#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */
+#define PRES_WRITE_ENABLED_S 19
+#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */
+#define PRES_DAT_SIGNAL_S 20
+#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */
+#define PRES_CMD_SIGNAL_S 24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */
+#define HOST_LED_S 0
+#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */
+#define HOST_DATA_WIDTH_S 1
+#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */
+#define HOST_DMA_SEL_S 3
+#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */
+#define HOST_HI_SPEED_EN_S 2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */
+
+#define HOST_CONTR_VER_2 (1)
+#define HOST_CONTR_VER_3 (2)
+
+/* misc defines */
+#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */
+#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */
+#define PWR_BUS_EN_S 0
+#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */
+#define PWR_VOLTS_S 1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */
+#define SW_RESET_ALL_S 0
+#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */
+#define SW_RESET_CMD_S 1
+#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */
+#define SW_RESET_DAT_S 2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S 0
+#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S 1
+#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S 2
+#define INTSTAT_DMA_INT_M BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S 3
+#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S 4
+#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S 5
+#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S 6
+#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S 7
+#define INTSTAT_CARD_INT_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S 8
+#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */
+#define INTSTAT_RETUNING_INT_S 12
+#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */
+#define INTSTAT_ERROR_INT_S 15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S 0
+#define ERRINT_CMD_CRC_M BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S 1
+#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S 2
+#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S 3
+#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S 4
+#define ERRINT_DATA_CRC_M BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S 5
+#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S 6
+#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S 7
+#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S 8
+#define ERRINT_VENDOR_M BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S 12
+#define ERRINT_ADMA_M BITFIELD_MASK(1)
+#define ERRINT_ADMA_S 9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT 0x0001
+#define ERRINT_CMD_CRC_BIT 0x0002
+#define ERRINT_CMD_ENDBIT_BIT 0x0004
+#define ERRINT_CMD_INDEX_BIT 0x0008
+#define ERRINT_DATA_TIMEOUT_BIT 0x0010
+#define ERRINT_DATA_CRC_BIT 0x0020
+#define ERRINT_DATA_ENDBIT_BIT 0x0040
+#define ERRINT_CURRENT_LIMIT_BIT 0x0080
+#define ERRINT_AUTO_CMD12_BIT 0x0100
+#define ERRINT_ADMA_BIT 0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+ ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+ ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl : Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */
+/* SD_IntrStatus : Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */
+/* SD_Capabilities : Offset 0x040 , size = bytes */
+/* SD_MaxCurCap : Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE 0
+#define SDIOH_ADMA1_MODE 1
+#define SDIOH_ADMA2_MODE 2
+#define SDIOH_ADMA2_64_MODE 3
+
+#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+ uint32 len_attr;
+ uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+ uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 000000000000..83f82de26497
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+ int func;
+ int offset;
+ int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+
+#define NUM_PREV_TRANSACTIONS 16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 000000000000..ac52bc1b84e3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,313 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h 321982 2012-03-19 06:58:08Z $
+ */
+
+#ifndef _siutils_h_
+#define _siutils_h_
+
+
+struct si_pub {
+ uint socitype;
+
+ uint bustype;
+ uint buscoretype;
+ uint buscorerev;
+ uint buscoreidx;
+ int ccrev;
+ uint32 cccaps;
+ uint32 cccaps_ext;
+ int pmurev;
+ uint32 pmucaps;
+ uint boardtype;
+ uint boardrev;
+ uint boardvendor;
+ uint boardflags;
+ uint boardflags2;
+ uint chip;
+ uint chiprev;
+ uint chippkg;
+ uint32 chipst;
+ bool issim;
+ uint socirev;
+ bool pci_pr32414;
+
+};
+
+
+typedef const struct si_pub si_t;
+
+
+
+#define SI_OSH NULL
+
+#define BADIDX (SI_MAXCORES + 1)
+
+
+#define XTAL 0x1
+#define PLL 0x2
+
+
+#define CLK_FAST 0
+#define CLK_DYNAMIC 2
+
+
+#define GPIO_DRV_PRIORITY 0
+#define GPIO_APP_PRIORITY 1
+#define GPIO_HI_PRIORITY 2
+
+
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+
+#define GPIO_REGEVT 0
+#define GPIO_REGEVT_INTMSK 1
+#define GPIO_REGEVT_INTPOL 2
+
+
+#define SI_DEVPATH_BUFSZ 16
+
+
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2
+#define SI_PCIUP 3
+
+#define ISSIM_ENAB(sih) 0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih) (BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih) (0)
+#define CCPLL_ENAB(sih) (0)
+#else
+#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+#define CC_BTCOEX_EN_MASK 0x01
+
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val) (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a) do {} while (0)
+static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+
+
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on);
+
+
+extern bool si_is_sprom_available(si_t *sih);
+extern bool si_is_sprom_enabled(si_t *sih);
+extern void si_sprom_enable(si_t *sih, bool enable);
+
+
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT 0
+#define CIS_SROM 1
+#define CIS_OTP 2
+
+
+#define DEFAULT_FAB 0x0
+#define CSM_FAB7 0x1
+#define TSMC_FAB12 0x2
+#define SMIC_FAB4 0x3
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern uint si_pll_reset(si_t *sih);
+
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h
new file mode 100644
index 000000000000..ccae9779cdae
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/spid.h
@@ -0,0 +1,153 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: spid.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _SPI_H
+#define _SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+ uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */
+ uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */
+ uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay
+ * function selection, command/data error check
+ */
+ uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+ uint16 intr_reg; /* 0x04, Intr status register */
+ uint16 intr_en_reg; /* 0x06, Intr mask register */
+ uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */
+ uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */
+ uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */
+ uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */
+ uint32 test_read; /* 0x14, RO 0xfeedbead signature */
+ uint32 test_rw; /* 0x18, RW */
+ uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+ uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+ uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+ uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG 0x00
+#define SPID_RESPONSE_DELAY 0x01
+#define SPID_STATUS_ENABLE 0x02
+#define SPID_RESET_BP 0x03 /* (corerev >= 1) */
+#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG 0x08 /* 32 bits */
+#define SPID_F1_INFO_REG 0x0C /* 16 bits */
+#define SPID_F2_INFO_REG 0x0E /* 16 bits */
+#define SPID_F3_INFO_REG 0x10 /* 16 bits */
+#define SPID_TEST_READ 0x14 /* 32 bits */
+#define SPID_TEST_RW 0x18 /* 32 bits */
+#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */
+#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */
+#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */
+#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */
+#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */
+#define RESET_SPI 0x80 /* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW 0x0002
+#define F2_F3_FIFO_WR_OVERFLOW 0x0004
+#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */
+#define DATA_ERROR 0x0010 /* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE 0x0020
+#define F3_PACKET_AVAILABLE 0x0040
+#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0 0x0100
+#define MISC_INTR1 0x0200
+#define MISC_INTR2 0x0400
+#define MISC_INTR3 0x0800
+#define MISC_INTR4 0x1000
+#define F1_INTR 0x2000
+#define F2_INTR 0x4000
+#define F3_INTR 0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE 0x00000001
+#define STATUS_UNDERFLOW 0x00000002
+#define STATUS_OVERFLOW 0x00000004
+#define STATUS_F2_INTR 0x00000008
+#define STATUS_F3_INTR 0x00000010
+#define STATUS_F2_RX_READY 0x00000020
+#define STATUS_F3_RX_READY 0x00000040
+#define STATUS_HOST_CMD_DATA_ERR 0x00000080
+#define STATUS_F2_PKT_AVAILABLE 0x00000100
+#define STATUS_F2_PKT_LEN_MASK 0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT 9
+#define STATUS_F3_PKT_AVAILABLE 0x00100000
+#define STATUS_F3_PKT_LEN_MASK 0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT 21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 0x0001
+#define F1_RDY_FOR_DATA_TRANSFER 0x0002
+#define F1_MAX_PKT_SIZE 0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 0x0001
+#define F2_RDY_FOR_DATA_TRANSFER 0x0002
+#define F2_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 0x0001
+#define F3_RDY_FOR_DATA_TRANSFER 0x0002
+#define F3_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+
+#define SPI_MAX_PKT_LEN (2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0 0
+#define SPI_FUNC_1 1
+#define SPI_FUNC_2 2
+#define SPI_FUNC_3 3
+
+#define WAIT_F2RXFIFORDY 100
+#define WAIT_F2RXFIFORDY_DELAY 20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 000000000000..bf92a5651af0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,53 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h 260898 2011-05-20 23:11:12Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_LEN 0x3B0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+
+struct trx_header {
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 000000000000..4eee5bab8ce0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h 286783 2011-09-29 06:18:57Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE false
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+
+#else
+
+
+#endif
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif
+#endif
+#endif
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif
+
+#if !defined(__DJGPP__)
+
+
+#if defined(__KERNEL__)
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>
+#endif
+
+#else
+
+
+#include <sys/types.h>
+
+#endif
+
+#endif
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef unsigned char bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else
+typedef float64 float_t;
+#endif
+
+#endif
+
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1
+#endif
+
+#define AUTO (-1)
+
+
+
+#ifndef PTRSZ
+#define PTRSZ sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__) || defined(__lint)
+ #define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+ #define BWL_COMPILER_ARMCC
+#else
+ #error "Unknown compiler!"
+#endif
+
+
+#ifndef INLINE
+ #if defined(BWL_COMPILER_MICROSOFT)
+ #define INLINE __inline
+ #elif defined(BWL_COMPILER_GNU)
+ #define INLINE __inline__
+ #elif defined(BWL_COMPILER_ARMCC)
+ #define INLINE __inline
+ #else
+ #define INLINE
+ #endif
+#endif
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+
+#include <bcmdefs.h>
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/usbrdl.h b/drivers/net/wireless/bcmdhd/include/usbrdl.h
new file mode 100644
index 000000000000..c90dccdcfad5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/usbrdl.h
@@ -0,0 +1,203 @@
+/*
+ * Broadcom USB remote download definitions
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: usbrdl.h 296577 2011-11-16 03:09:51Z $
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset event
+ * to occur in 2 seconds. It is the responsibility
+ * of the downloaded code to clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+
+#define DL_HWCMD_MASK 0xfc /* Mask for hardware read commands: */
+#define DL_RDHW 0x10 /* Read a hardware address (Ctl-in) */
+#define DL_RDHW32 0x10 /* Read a 32 bit word */
+#define DL_RDHW16 0x11 /* Read 16 bits */
+#define DL_RDHW8 0x12 /* Read an 8 bit byte */
+#define DL_WRHW 0x14 /* Write a hardware address (Ctl-out) */
+#define DL_WRHW_BLK 0x13 /* Block write to hardware access */
+
+#define DL_CMD_RDHW 1 /* read data from a backplane address */
+#define DL_CMD_WRHW 2 /* write data to a backplane address */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define DL_JTCONF 0x15 /* Get JTAG configuration (Ctl_in)
+ * Set JTAG configuration (Ctl-out)
+ */
+#define DL_JTON 0x16 /* Turn on jtag master (Ctl-in) */
+#define DL_JTOFF 0x17 /* Turn on jtag master (Ctl-in) */
+#define DL_RDRJT 0x18 /* Read a JTAG register (Ctl-in) */
+#define DL_WRJT 0x19 /* Write a hardware address over JTAG (Ctl/Bulk-out) */
+#define DL_WRRJT 0x1a /* Write a JTAG register (Ctl/Bulk-out) */
+#define DL_JTRST 0x1b /* Reset jtag fsm on jtag DUT (Ctl-in) */
+
+#define DL_RDJT 0x1c /* Read a hardware address over JTAG (Ctl-in) */
+#define DL_RDJT32 0x1c /* Read 32 bits */
+#define DL_RDJT16 0x1e /* Read 16 bits (sz = 4 - low bits) */
+#define DL_RDJT8 0x1f /* Read 8 bits */
+
+#define DL_MRDJT 0x20 /* Multiple read over JTAG (Ctl-out+Bulk-in) */
+#define DL_MRDJT32 0x20 /* M-read 32 bits */
+#define DL_MRDJT16 0x22 /* M-read 16 bits (sz = 4 - low bits) */
+#define DL_MRDJT6 0x23 /* M-read 8 bits */
+#define DL_MRDIJT 0x24 /* M-read over JTAG (Ctl-out+Bulk-in) with auto-increment */
+#define DL_MRDIJT32 0x24 /* M-read 32 bits w/ai */
+#define DL_MRDIJT16 0x26 /* M-read 16 bits w/ai (sz = 4 - low bits) */
+#define DL_MRDIJT8 0x27 /* M-read 8 bits w/ai */
+#define DL_MRDDJT 0x28 /* M-read over JTAG (Ctl-out+Bulk-in) with auto-decrement */
+#define DL_MRDDJT32 0x28 /* M-read 32 bits w/ad */
+#define DL_MRDDJT16 0x2a /* M-read 16 bits w/ad (sz = 4 - low bits) */
+#define DL_MRDDJT8 0x2b /* M-read 8 bits w/ad */
+#define DL_MWRJT 0x2c /* Multiple write over JTAG (Bulk-out) */
+#define DL_MWRIJT 0x2d /* With auto-increment */
+#define DL_MWRDJT 0x2e /* With auto-decrement */
+#define DL_VRDJT 0x2f /* Vector read over JTAG (Bulk-out+Bulk-in) */
+#define DL_VWRJT 0x30 /* Vector write over JTAG (Bulk-out+Bulk-in) */
+#define DL_SCJT 0x31 /* Jtag scan (Bulk-out+Bulk-in) */
+
+#define DL_CFRD 0x33 /* Reserved for dmamem use */
+#define DL_CFWR 0x34 /* Reserved for dmamem use */
+#define DL_GET_NVRAM 0x35 /* Query nvram parameter */
+
+#define DL_DBGTRIG 0xFF /* Trigger bRequest type to aid debug */
+
+#define DL_JTERROR 0x80000000
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt that includes the hdr info */
+#define DL_READY 1 /* hdr was good, waiting for more of the compressed image */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful, waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM value */
+#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START for rdl) */
+
+#define TIMEOUT 5000 /* Timeout for usb commands */
+
+struct bcm_device_id {
+ char *name;
+ uint32 vend;
+ uint32 prod;
+};
+
+typedef struct {
+ uint32 state;
+ uint32 bytes;
+} rdl_state_t;
+
+typedef struct {
+ uint32 chip; /* Chip id */
+ uint32 chiprev; /* Chip rev */
+ uint32 ramsize; /* Size of RAM */
+ uint32 remapbase; /* Current remap base address */
+ uint32 boardtype; /* Type of board */
+ uint32 boardrev; /* Board revision */
+} bootrom_id_t;
+
+/* struct for backplane & jtag accesses */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 addr; /* backplane address for write */
+ uint32 len; /* length of data: 1, 2, 4 bytes */
+ uint32 data; /* data to write */
+} hwacc_t;
+
+/* struct for backplane */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 addr; /* backplane address for write */
+ uint32 len; /* length of data: 1, 2, 4 bytes */
+ uint8 data[1]; /* data to write */
+} hwacc_blk_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct {
+ uint32 chip; /* Chip id */
+ uint32 chiprev; /* Chip rev */
+ uint32 ccrev; /* Chipcommon core rev */
+ uint32 siclock; /* Backplane clock */
+} jtagd_id_t;
+
+/* Jtag configuration structure */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint8 clkd; /* Jtag clock divisor */
+ uint8 disgpio; /* Gpio to disable external driver */
+ uint8 irsz; /* IR size for readreg/writereg */
+ uint8 drsz; /* DR size for readreg/writereg */
+
+ uint8 bigend; /* Big endian */
+ uint8 mode; /* Current mode */
+ uint16 delay; /* Delay between jtagm "simple commands" */
+
+ uint32 retries; /* Number of retries for jtagm operations */
+ uint32 ctrl; /* Jtag control reg copy */
+ uint32 ir_lvbase; /* Bits to add to IR values in LV tap */
+ uint32 dretries; /* Number of retries for dma operations */
+} jtagconf_t;
+
+/* struct for jtag scan */
+#define MAX_USB_IR_BITS 256
+#define MAX_USB_DR_BITS 3072
+#define USB_IR_WORDS (MAX_USB_IR_BITS / 32)
+#define USB_DR_WORDS (MAX_USB_DR_BITS / 32)
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 irsz; /* IR size in bits */
+ uint32 drsz; /* DR size in bits */
+ uint32 ts; /* Terminal state (def, pause, rti) */
+ uint32 data[USB_IR_WORDS + USB_DR_WORDS]; /* IR & DR data */
+} scjt_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* struct for querying nvram params from bootloader */
+#define QUERY_STRING_MAX 32
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ char var[QUERY_STRING_MAX]; /* param name */
+} nvparam_t;
+
+typedef void (*exec_fn_t)(void *sih);
+
+#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE)
+#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE)
+
+#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */
+
+#define RDL_CHUNK 1500 /* size of each dl transfer */
+
+/* bootloader makes special use of trx header "offsets" array */
+#define TRX_OFFSETS_DLFWLEN_IDX 0 /* Size of the fw; used in uncompressed case */
+#define TRX_OFFSETS_JUMPTO_IDX 1 /* RAM address for jumpto after download */
+#define TRX_OFFSETS_NVM_LEN_IDX 2 /* Length of appended NVRAM data */
+
+#define TRX_OFFSETS_DLBASE_IDX 0 /* RAM start address for download */
+
+#endif /* _USB_RDL_H */
diff --git a/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h b/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h
new file mode 100644
index 000000000000..95f616a5fc72
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h
@@ -0,0 +1,117 @@
+/*
+ * EXTLOG Module log ID to log Format String mapping table
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: wlc_extlog_idstr.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _WLC_EXTLOG_IDSTR_H_
+#define _WLC_EXTLOG_IDSTR_H_
+
+#include "wlioctl.h"
+
+/* Strings corresponding to the IDs defined in wlioctl.h
+ * This file is only included by the apps and not included by the external driver
+ * Formats of pre-existing ids should NOT be changed
+ */
+log_idstr_t extlog_fmt_str[ ] = {
+ {FMTSTR_DRIVER_UP_ID, 0, LOG_ARGTYPE_NULL,
+ "Driver is Up\n"},
+
+ {FMTSTR_DRIVER_DOWN_ID, 0, LOG_ARGTYPE_NULL,
+ "Driver is Down\n"},
+
+ {FMTSTR_SUSPEND_MAC_FAIL_ID, 0, LOG_ARGTYPE_INT,
+ "wlc_suspend_mac_and_wait() failed with psmdebug 0x%08x\n"},
+
+ {FMTSTR_NO_PROGRESS_ID, 0, LOG_ARGTYPE_INT,
+ "No Progress on TX for %d seconds\n"},
+
+ {FMTSTR_RFDISABLE_ID, 0, LOG_ARGTYPE_INT,
+ "Detected a change in RF Disable Input 0x%x\n"},
+
+ {FMTSTR_REG_PRINT_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Register %s = 0x%x\n"},
+
+ {FMTSTR_EXPTIME_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Strong RF interference detected\n"},
+
+ {FMTSTR_JOIN_START_ID, FMTSTRF_USER, LOG_ARGTYPE_STR,
+ "Searching for networks with ssid %s\n"},
+
+ {FMTSTR_JOIN_COMPLETE_ID, FMTSTRF_USER, LOG_ARGTYPE_STR,
+ "Successfully joined network with BSSID %s\n"},
+
+ {FMTSTR_NO_NETWORKS_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "No networks found. Please check if the network exists and is in range\n"},
+
+ {FMTSTR_SECURITY_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "AP rejected due to security mismatch. Change the security settings and try again...\n"},
+
+ {FMTSTR_RATE_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "AP rejected due to rate mismatch\n"},
+
+ {FMTSTR_AP_PRUNED_ID, 0, LOG_ARGTYPE_INT,
+ "AP rejected due to reason %d\n"},
+
+ {FMTSTR_KEY_INSERTED_ID, 0, LOG_ARGTYPE_INT,
+ "Inserting keys for algorithm %d\n"},
+
+ {FMTSTR_DEAUTH_ID, FMTSTRF_USER, LOG_ARGTYPE_STR_INT,
+ "Received Deauth from %s with Reason %d\n"},
+
+ {FMTSTR_DISASSOC_ID, FMTSTRF_USER, LOG_ARGTYPE_STR_INT,
+ "Received Disassoc from %s with Reason %d\n"},
+
+ {FMTSTR_LINK_UP_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Link Up\n"},
+
+ {FMTSTR_LINK_DOWN_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Link Down\n"},
+
+ {FMTSTR_RADIO_HW_OFF_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Radio button is turned OFF. Please turn it on...\n"},
+
+ {FMTSTR_RADIO_HW_ON_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Hardware Radio button is turned ON\n"},
+
+ {FMTSTR_EVENT_DESC_ID, 0, LOG_ARGTYPE_INT_STR,
+ "Generated event id %d: (result status) is (%s)\n"},
+
+ {FMTSTR_PNP_SET_POWER_ID, 0, LOG_ARGTYPE_INT,
+ "Device going into power state %d\n"},
+
+ {FMTSTR_RADIO_SW_OFF_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Software Radio is disabled. Please enable it through the UI...\n"},
+
+ {FMTSTR_RADIO_SW_ON_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Software Radio is enabled\n"},
+
+ {FMTSTR_PWD_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Potential passphrase mismatch. Please try a different one...\n"},
+
+ {FMTSTR_FATAL_ERROR_ID, 0, LOG_ARGTYPE_INT,
+ "Fatal Error: intstatus 0x%x\n"},
+
+ {FMTSTR_AUTH_FAIL_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Authentication to %s Failed with status %d\n"},
+
+ {FMTSTR_ASSOC_FAIL_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Association to %s Failed with status %d\n"},
+
+ {FMTSTR_IBSS_FAIL_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Unable to start IBSS since PeerNet is already active\n"},
+
+ {FMTSTR_EXTAP_FAIL_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Unable to start Ext-AP since PeerNet is already active\n"},
+
+ {FMTSTR_MAX_ID, 0, 0, "\0"}
+};
+
+#endif /* _WLC_EXTLOG_IDSTR_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 000000000000..82f29d1b14fe
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,229 @@
+/*
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h 328114 2012-04-18 00:02:46Z $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+ /* Use TLV to convey WLFC information.
+ ---------------------------------------------------------------------------
+ | Type | Len | value | Description
+ ---------------------------------------------------------------------------
+ | 1 | 1 | (handle) | MAC OPEN
+ ---------------------------------------------------------------------------
+ | 2 | 1 | (handle) | MAC CLOSE
+ ---------------------------------------------------------------------------
+ | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+ ---------------------------------------------------------------------------
+ | 4 | 4 | see pkttag comments | TXSTATUS
+ ---------------------------------------------------------------------------
+ | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware]
+ ---------------------------------------------------------------------------
+ | 6 | 8 | (handle, ifid, MAC) | MAC ADD
+ ---------------------------------------------------------------------------
+ | 7 | 8 | (handle, ifid, MAC) | MAC DEL
+ ---------------------------------------------------------------------------
+ | 8 | 1 | (rssi) | RSSI - RSSI value for the packet.
+ ---------------------------------------------------------------------------
+ | 9 | 1 | (interface ID) | Interface OPEN
+ ---------------------------------------------------------------------------
+ | 10 | 1 | (interface ID) | Interface CLOSE
+ ---------------------------------------------------------------------------
+ | 11 | 8 | fifo credit returns map | FIFO credits back to the host
+ | | | |
+ | | | | --------------------------------------
+ | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+ | | | | --------------------------------------
+ | | | |
+ ---------------------------------------------------------------------------
+ | 12 | 2 | MAC handle, | Host provides a bitmap of pending
+ | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn.
+ | | | | [host->firmware]
+ ---------------------------------------------------------------------------
+ | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific
+ | | | | MAC destination.
+ ---------------------------------------------------------------------------
+ | 15 | 1 | interface ID | NIC period start
+ ---------------------------------------------------------------------------
+ | 16 | 1 | interface ID | NIC period end
+ ---------------------------------------------------------------------------
+ | 17 | 3 | (ifid, txs) | Action frame tx status
+ ---------------------------------------------------------------------------
+ | 255 | N/A | N/A | FILLER - This is a special type
+ | | | | that has no length or value.
+ | | | | Typically used for padding.
+ ---------------------------------------------------------------------------
+ */
+
+#define WLFC_CTL_TYPE_MAC_OPEN 1
+#define WLFC_CTL_TYPE_MAC_CLOSE 2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3
+#define WLFC_CTL_TYPE_TXSTATUS 4
+#define WLFC_CTL_TYPE_PKTTAG 5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD 6
+#define WLFC_CTL_TYPE_MACDESC_DEL 7
+#define WLFC_CTL_TYPE_RSSI 8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN 9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14
+
+#define WLFC_CTL_TYPE_NIC_PRD_START 15
+#define WLFC_CTL_TYPE_NIC_PRD_END 16
+#define WLFC_CTL_TYPE_AF_TXS 17
+
+#define WLFC_CTL_TYPE_FILLER 255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI 1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE 1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS 4
+#define WLFC_CTL_VALUE_LEN_PKTTAG 4
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
+#define WLFC_CTL_VALUE_LEN_NIC_PRD_START 1
+#define WLFC_CTL_VALUE_LEN_NIC_PRD_END 1
+#define WLFC_CTL_VALUE_LEN_AF_TXS 3
+
+
+#define WLFC_PKTID_GEN_MASK 0x80000000
+#define WLFC_PKTID_GEN_SHIFT 31
+
+#define WLFC_PKTID_GEN(x) (((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT)
+#define WLFC_PKTID_SETGEN(x, gen) (x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \
+ (((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK)
+
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+
+#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT 27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+ WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT 24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \
+ ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE 32
+#define WLFC_MAX_IFNUM 16
+#define WLFC_MAC_DESC_ID_INVALID 0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
+ (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
+ ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WL_TXSTATUS_GENERATION_MASK 1
+#define WL_TXSTATUS_GENERATION_SHIFT 31
+
+#define WLFC_PKTFLAG_SET_GENERATION(x, gen) ((x) = \
+ ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+ (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WLFC_PKTFLAG_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+ WL_TXSTATUS_GENERATION_MASK)
+
+#define WLFC_MAX_PENDING_DATALEN 120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD 0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1
+/* WL firmware suppressed a packet because MAC is
+ already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
+
+#define WLFC_D11_STATUS_INTERPRET(txs) \
+ (((txs)->status.suppr_ind != 0) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD)
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+ {printf("WLFC: %s():%d:caller:%p\n", \
+ __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+ banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS 256
+#define WLHOST_REORDERDATA_LEN 10
+#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET 0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET 4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET 6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8
+
+#define WLHOST_REORDERDATA_DEL_FLOW 0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL 0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID 0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08
+#define WLHOST_REORDERDATA_NEW_HOLE 0x10
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 000000000000..c25f7944c0e8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,5067 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h 328096 2012-04-17 23:07:20Z $
+ */
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* LINUX_POSTMOGRIFY_REMOVAL: undefined during compile phase, so its
+ * a no-op for most cases. For hybrid and other open source releases,
+ * its defined during a second pass and mogrified out for distribution.
+ */
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ 16
+#endif
+
+/* Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+ cdc_ioctl_t msg;
+ uint data_len;
+ char intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+ struct ether_addr da;
+ uint16 len;
+ uint32 packetId;
+ uint8 data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+ uint8 ssid_len; /* the length of SSID */
+ uint8 ssid[32]; /* SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+ uint32 channel;
+ int32 dwell_time;
+ struct ether_addr BSSID;
+ wl_action_frame_t action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL 0
+#define MFP_TEST_FLAG_ANY_KEY 1
+typedef struct wl_sa_query {
+ uint32 flag;
+ uint8 action;
+ uint16 id;
+ struct ether_addr da;
+} wl_sa_query_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ uint8 channel; /* Channel no. */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+ uint32 ie_length; /* byte length of Information Elements */
+ /* variable length Information Elements */
+} wl_bss_info_107_t;
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ chanspec_t chanspec; /* chanspec for bss */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+
+ uint8 n_cap; /* BSS is 802.11N Capable */
+ uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
+ uint8 ctl_ch; /* 802.11N BSS control channel number */
+ uint32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ uint8 flags; /* flags */
+ uint8 reserved[3]; /* Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /* offset at which IEs start, from beginning */
+ uint32 ie_length; /* byte length of Information Elements */
+ /* Add new fields here */
+ /* variable length Information Elements */
+} wl_bss_info_108_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_BSS_INFO_VERSION 109 /* current version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ chanspec_t chanspec; /* chanspec for bss */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+
+ uint8 n_cap; /* BSS is 802.11N Capable */
+ uint32 nbss_cap; /* 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /* 802.11N BSS control channel number */
+ uint8 padding1[3]; /* explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /* VHT rx mcs map */
+ uint16 vht_txmcsmap; /* VHT tx mcs map */
+ uint8 flags; /* flags */
+ uint8 vht_cap; /* BSS is vht capable */
+ uint8 reserved[2]; /* Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /* offset at which IEs start, from beginning */
+ uint32 ie_length; /* byte length of Information Elements */
+ int16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+} wl_bss_info_t;
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info was received on channel (vs offchannel) */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ 0x00000100
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+typedef struct wl_bsscfg {
+ uint32 wsec;
+ uint32 WPA_auth;
+ uint32 wsec_index;
+ uint32 associated;
+ uint32 BSS;
+ uint32 phytest_on;
+ struct ether_addr prev_BSSID;
+ struct ether_addr BSSID;
+ uint32 targetbss_wpa2_flags;
+ uint32 assoc_type;
+ uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_bss_config {
+ uint32 atim_window;
+ uint32 beacon_period;
+ uint32 chanspec;
+} wl_bss_config_t;
+
+#define DLOAD_HANDLER_VER 1 /* Downloader version */
+#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 0x0001
+
+/* generic download types & flags */
+enum {
+ DL_TYPE_UCODE = 1,
+ DL_TYPE_CLM = 2
+};
+
+/* ucode type values */
+enum {
+ UCODE_FW,
+ INIT_VALS,
+ BS_INIT_VALS
+};
+
+struct wl_dload_data {
+ uint16 flag;
+ uint16 dload_type;
+ uint32 len;
+ uint32 crc;
+ uint8 data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+ uint32 ucode_type;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_num;
+ uint8 data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+ uint32 ds_id;
+ uint32 clm_total_len;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_offset;
+ uint8 data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct wlc_ssid {
+ uint32 SSID_len;
+ uchar SSID[32];
+} wlc_ssid_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+#define MAX_PREFERRED_AP_NUM 5
+typedef struct wlc_fastssidinfo {
+ uint32 SSID_channel[MAX_PREFERRED_AP_NUM];
+ wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT wnm_url_t;
+
+typedef struct chan_scandata {
+ uint8 txpower;
+ uint8 pad;
+ chanspec_t channel; /* Channel num, bw, ctrl_sb and band */
+ uint32 channel_mintime;
+ uint32 channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+ EXTDSCAN_FOREGROUND_SCAN,
+ EXTDSCAN_BACKGROUND_SCAN,
+ EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID 5
+
+typedef struct wl_extdscan_params {
+ int8 nprobes; /* 0, passive, otherwise active */
+ int8 split_scan; /* split scan */
+ int8 band; /* band */
+ int8 pad;
+ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
+ uint32 tx_rate; /* in 500ksec units */
+ wl_scan_type_t scan_type; /* enum */
+ int32 channel_num;
+ chan_scandata_t channel_list[1]; /* list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY 2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
+#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
+#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+
+typedef struct wl_scan_params {
+ wlc_ssid_t ssid; /* default: {0, ""} */
+ struct ether_addr bssid; /* default: bcast */
+ int8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 scan_type; /* flags, 0 use default */
+ int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /* -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /* list of chanspecs */
+} wl_scan_params_t;
+
+/* size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define ISCAN_REQ_VERSION 1
+
+/* incremental scan struct */
+typedef struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/* 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct wl_scan_results {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+/* Used in EXT_STA */
+#define DNGL_RXCTXT_SIZE 45
+
+#if defined(SIMPLE_ISCAN)
+#define ISCAN_RETRY_CNT 5
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+#define ISCAN_STATE_PENDING 2
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+#endif /* SIMPLE_ISCAN */
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+/* incremental scan results struct */
+typedef struct wl_iscan_results {
+ uint32 status;
+ wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/* size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef struct wl_probe_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ struct ether_addr mac;
+} wl_probe_params_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_MAXRATES_IN_SET 16 /* max # of rates in a rateset */
+typedef struct wl_rateset {
+ uint32 count; /* # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+ uint32 count; /* # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
+} wl_rateset_args_t;
+
+/* uint32 list */
+typedef struct wl_uint32_list {
+ /* in - # of elements, out - # of entries */
+ uint32 count;
+ /* variable length uint32 list */
+ uint32 element[1];
+} wl_uint32_list_t;
+
+/* used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+ struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */
+ int32 chanspec_num; /* 0: all available channels,
+ * otherwise count of chanspecs in chanspec_list
+ */
+ chanspec_t chanspec_list[1]; /* list of chanspecs */
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/* used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+ wlc_ssid_t ssid;
+ wl_assoc_params_t params; /* optional field, but it must include the fixed portion
+ * of the wl_assoc_params_t struct when it does present.
+ */
+} wl_join_params_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \
+ WL_ASSOC_PARAMS_FIXED_SIZE)
+/* scan params for extended join */
+typedef struct wl_join_scan_params {
+ uint8 scan_type; /* 0 use default, active or passive scan */
+ int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /* -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+} wl_join_scan_params_t;
+
+/* extended join params */
+typedef struct wl_extjoin_params {
+ wlc_ssid_t ssid; /* {0, ""}: wildcard scan */
+ wl_join_scan_params_t scan;
+ wl_join_assoc_params_t assoc; /* optional field, but it must include the fixed portion
+ * of the wl_join_assoc_params_t struct when it does
+ * present.
+ */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \
+ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef D11AC_IOTYPES
+
+/* defines used by the nrate iovar */
+#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define NRATE_STF_SHIFT 8 /* stf mode shift */
+#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
+
+#else /* D11AC_IOTYPES */
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK 0x00000300
+#define WL_RSPEC_TXEXP_SHIFT 8
+#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */
+#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ 0x00010000
+#define WL_RSPEC_BW_40MHZ 0x00020000
+#define WL_RSPEC_BW_80MHZ 0x00030000
+#define WL_RSPEC_BW_160MHZ 0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */
+#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI 0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */
+#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */
+#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */
+#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */
+
+#endif /* D11AC_IOTYPES */
+
+#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
+#define ANTENNA_NUM_2 2
+#define ANTENNA_NUM_3 3
+#define ANTENNA_NUM_4 4
+
+#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
+#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
+#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
+
+#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
+
+typedef struct {
+ uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
+ uint8 num_antcfg; /* number of available antenna configurations */
+} wlc_antselcfg_t;
+
+#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+
+#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
+
+#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
+#define IBSS_HI 25 /* Hi in-bss congestion percentage */
+#define OBSS_MED 12
+#define OBSS_HI 25
+#define INTERFER_MED 5
+#define INTERFER_HI 10
+
+#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
+#define CCA_FLAGS_PREFER_1_6_11 0x10
+#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
+
+typedef struct {
+ uint32 duration; /* millisecs spent sampling this channel */
+ uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
+ /* move if cur bss moves channels) */
+ uint32 congest_obss; /* traffic not in our bss */
+ uint32 interference; /* millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /* second timestamp */
+} cca_congest_t;
+
+typedef struct {
+ chanspec_t chanspec; /* Which channel? */
+ uint8 num_secs; /* How many secs worth of data */
+ cca_congest_t secs[1]; /* Data */
+} cca_congest_channel_req_t;
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE 0 /* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */
+
+/* interference sources */
+enum interference_source {
+ ITFR_NONE = 0, /* interference */
+ ITFR_PHONE, /* wireless phone */
+ ITFR_VIDEO_CAMERA, /* wireless video camera */
+ ITFR_MICROWAVE_OVEN, /* microwave oven */
+ ITFR_BABY_MONITOR, /* wireless baby monitor */
+ ITFR_BLUETOOTH, /* bluetooth */
+ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /* wireless camera or baby monitor */
+ ITFR_BLUETOOTH_OR_BABY_MONITOR, /* bluetooth or baby monitor */
+ ITFR_VIDEO_CAMERA_OR_PHONE, /* video camera or phone */
+ ITFR_UNIDENTIFIED /* interference from unidentified source */
+};
+
+/* structure for interference source report */
+typedef struct {
+ uint32 flags; /* flags. bit definitions below */
+ uint32 source; /* last detected interference source */
+ uint32 timestamp; /* second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED 1 /* interference detected */
+#define ITFR_HOME_CHANNEL 2 /* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+typedef struct wl_country {
+ char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in
+ * the Country IE
+ */
+ int32 rev; /* revision specifier for ccode
+ * on set, -1 indicates unspecified.
+ * on get, rev >= 0
+ */
+ char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code.
+ * variable length, but fixed size in
+ * struct allows simple allocation for
+ * expected country strings <= 3 chars.
+ */
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+ uint32 buflen;
+ uint32 band;
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ uint32 count;
+ uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+ uint32 buflen;
+ uint32 band_set;
+ uint32 band;
+ uint32 count;
+ char country_abbrev[1];
+} wl_country_list_t;
+
+#define WL_NUM_RPI_BINS 8
+#define WL_RM_TYPE_BASIC 1
+#define WL_RM_TYPE_CCA 2
+#define WL_RM_TYPE_RPI 3
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE (1<<1)
+#define WL_RM_FLAG_INCAPABLE (1<<2)
+#define WL_RM_FLAG_REFUSED (1<<3)
+
+typedef struct wl_rm_req_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /* token for this measurement */
+ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /* TSF low 32-bits */
+ uint32 dur; /* TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+ uint32 token; /* overall measurement set token */
+ uint32 count; /* number of measurement requests */
+ void *cb; /* completion callback function: may be NULL */
+ void *cb_arg; /* arg to completion callback function */
+ wl_rm_req_elt_t req[1]; /* variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /* token for this measurement */
+ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /* TSF low 32-bits */
+ uint32 dur; /* TUs */
+ uint32 len; /* byte length of data block */
+ uint8 data[1]; /* variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+ uint8 rpi[WL_RPI_REP_BIN_NUM];
+ int8 rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+ uint32 token; /* overall measurement set token */
+ uint32 len; /* length of measurement report block */
+ wl_rm_rep_elt_t rep[1]; /* variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN 8
+
+
+typedef enum sup_auth_status {
+ /* Basic supplicant authentication states */
+ WLC_SUP_DISCONNECTED = 0,
+ WLC_SUP_CONNECTING,
+ WLC_SUP_IDREQUIRED,
+ WLC_SUP_AUTHENTICATING,
+ WLC_SUP_AUTHENTICATED,
+ WLC_SUP_KEYXCHANGE,
+ WLC_SUP_KEYED,
+ WLC_SUP_TIMEOUT,
+ WLC_SUP_LAST_BASIC_STATE,
+
+ /* Extended supplicant authentication states */
+ /* Waiting to receive handshake msg M1 */
+ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+ /* Preparing to send handshake msg M2 */
+ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+ /* Waiting to receive handshake msg M3 */
+ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+ WLC_SUP_KEYXCHANGE_PREP_M4, /* Preparing to send handshake msg M4 */
+ WLC_SUP_KEYXCHANGE_WAIT_G1, /* Waiting to receive handshake msg G1 */
+ WLC_SUP_KEYXCHANGE_PREP_G2 /* Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#define CRYPTO_ALGO_NALG 7
+#ifdef BCMWAPI_WPI
+#define CRYPTO_ALGO_SMS4 11
+#endif /* BCMWAPI_WPI */
+#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+#define WSEC_GEN_MFP_ACT_ERROR 0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020
+
+#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
+#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+
+typedef struct wl_wsec_key {
+ uint32 index; /* key index */
+ uint32 len; /* key length */
+ uint8 data[DOT11_MAX_KEY_SIZE]; /* key data */
+ uint32 pad_1[18];
+ uint32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ uint32 flags; /* misc flags */
+ uint32 pad_2[2];
+ int pad_3;
+ int iv_initialized; /* has IV been initialized already? */
+ int pad_4;
+ /* Rx IV */
+ struct {
+ uint32 hi; /* upper 32 bits of IV */
+ uint16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ uint32 pad_5[2];
+ struct ether_addr ea; /* per station */
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN 8
+#define WSEC_MAX_PSK_LEN 64
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE (1<<0)
+
+/* receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct {
+ ushort key_len; /* octets in key material */
+ ushort flags; /* key handling qualification */
+ uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */
+} wsec_pmk_t;
+
+/* wireless security bitvec */
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED 0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+#ifdef MFP
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
+#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
+#endif /* MFP */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI 0x0400
+#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
+#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+/* pmkid */
+#define MAXPMKID 16
+
+typedef struct _pmkid {
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+ uint32 npmkid;
+ pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+ struct ether_addr BSSID;
+ uint8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ uint32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct wl_assoc_info {
+ uint32 req_len;
+ uint32 resp_len;
+ uint32 flags;
+ struct dot11_assoc_req req;
+ struct ether_addr reassoc_bssid; /* used in reassoc's */
+ struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+typedef struct wl_led_info {
+ uint32 index; /* led index */
+ uint32 behavior;
+ uint8 activehi;
+} wl_led_info_t;
+
+
+/* srom read/write struct passed through ioctl */
+typedef struct {
+ uint byteoff; /* byte offset */
+ uint nbytes; /* number of bytes */
+ uint16 buf[1];
+} srom_rw_t;
+
+/* similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+ uint32 source; /* cis source */
+ uint32 byteoff; /* byte offset */
+ uint32 nbytes; /* number of bytes */
+ /* data follows here */
+} cis_rw_t;
+
+#define WLC_CIS_DEFAULT 0 /* built-in default */
+#define WLC_CIS_SROM 1 /* source is sprom */
+#define WLC_CIS_OTP 2 /* source is otp */
+
+/* R_REG and W_REG struct passed through ioctl */
+typedef struct {
+ uint32 byteoff; /* byte offset of the field in d11regs_t */
+ uint32 val; /* read/write value of the field */
+ uint32 size; /* sizeof the field */
+ uint band; /* band (optional) */
+} rw_reg_t;
+
+/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
+
+typedef struct {
+ uint16 auto_ctrl; /* WL_ATTEN_XX */
+ uint16 bb; /* Baseband attenuation */
+ uint16 radio; /* Radio attenuation */
+ uint16 txctl1; /* Radio TX_CTL1 value */
+} atten_t;
+
+/* Per-AC retry parameters */
+struct wme_tx_params_s {
+ uint8 short_retry;
+ uint8 short_fallback;
+ uint8 long_retry;
+ uint8 long_fallback;
+ uint16 max_rate; /* In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
+#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
+/* value >= 0 causes
+ * - input to be set to that value
+ * - PCL to be off
+ */
+
+/* Used to get specific link/ac parameters */
+typedef struct {
+ int ac;
+ uint8 val;
+ struct ether_addr ea;
+} link_val_t;
+
+#define BCM_MAC_STATUS_INDICATION (0x40010200L)
+
+typedef struct {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint16 cap; /* sta's advertised capabilities */
+ uint32 flags; /* flags defined below */
+ uint32 idle; /* time since data pkt rx'd from sta */
+ struct ether_addr ea; /* Station address */
+ wl_rateset_t rateset; /* rateset in use */
+ uint32 in; /* seconds elapsed since associated */
+ uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /* # of packets transmitted */
+ uint32 tx_failures; /* # of packets failed */
+ uint32 rx_ucast_pkts; /* # of unicast packets received */
+ uint32 rx_mcast_pkts; /* # of multicast packets received */
+ uint32 tx_rate; /* Rate of last successful tx frame */
+ uint32 rx_rate; /* Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts)
+
+#define WL_STA_VER 3
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM 0x1 /* Running a Broadcom driver */
+#define WL_STA_WME 0x2 /* WMM association */
+#define WL_STA_UNUSED 0x4
+#define WL_STA_AUTHE 0x8 /* Authenticated */
+#define WL_STA_ASSOC 0x10 /* Associated */
+#define WL_STA_AUTHO 0x20 /* Authorized */
+#define WL_STA_WDS 0x40 /* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP 0x80 /* WDS traffic/probes flowing properly */
+#define WL_STA_PS 0x100 /* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE 0x200 /* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK 0x400 /* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI 0x800 /* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO 0x1000 /* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP 0x2000 /* STA 802.11n capable */
+#define WL_STA_SCBSTATS 0x4000 /* Per STA debug stats */
+
+#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Used to get specific STA parameters */
+typedef struct {
+ uint32 val;
+ struct ether_addr ea;
+} scb_val_t;
+
+/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+ uint32 code;
+ scb_val_t ioctl_args;
+} authops_t;
+
+/* channel encoding */
+typedef struct channel_info {
+ int hw_channel;
+ int target_channel;
+ int scan_channel;
+} channel_info_t;
+
+/* For ioctls that take a list of MAC addresses */
+struct maclist {
+ uint count; /* number of MAC addresses */
+ struct ether_addr ea[1]; /* variable length array of MAC addresses */
+};
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+ uint rx_good_pkt;
+ uint rx_bad_pkt;
+ uint tx_good_pkt;
+ uint tx_bad_pkt;
+ uint rx_ocast_good_pkt; /* unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN 0
+#define LQ_IDX_MAX 1
+#define LQ_IDX_AVG 2
+#define LQ_IDX_SUM 2
+#define LQ_IDX_LAST 3
+#define LQ_STOP_MONITOR 0
+#define LQ_START_MONITOR 1
+
+/* Get averages RSSI, Rx PHY rate and SNR values */
+typedef struct {
+ int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */
+ int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */
+ int isvalid; /* Flag indicating whether above data is valid */
+} wl_lq_t; /* Link Quality */
+
+typedef enum wl_wakeup_reason_type {
+ LCD_ON = 1,
+ LCD_OFF,
+ DRC1_WAKE,
+ DRC2_WAKE,
+ REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+/* Unique filter id */
+ uint32 id;
+
+/* stores the reason for the last wake up */
+ uint8 reason;
+} wl_wr_t;
+
+/* Get MAC specific rate histogram command */
+typedef struct {
+ struct ether_addr ea; /* MAC Address */
+ uint8 ac_cat; /* Access Category */
+ uint8 num_pkts; /* Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t; /* MAC Specific Rate Histogram command */
+
+/* Get MAC rate histogram response */
+typedef struct {
+ uint32 rate[WLC_MAXRATE + 1]; /* Rates */
+ uint32 mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX]; /* MCS counts */
+ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /* VHT counts */
+ uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */
+} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ uint8 set; /* 1=set IOCTL; 0=query IOCTL */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+} wl_ioctl_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype set /* subtype param */
+#define ioctl_pid used /* pid param */
+#define ioctl_status needed /* status param */
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+ uint vendorid; /* PCI vendor id */
+ uint deviceid; /* device id of chip */
+ uint radiorev; /* radio revision */
+ uint chiprev; /* chip revision */
+ uint corerev; /* core revision */
+ uint boardid; /* board identifier (usu. PCI sub-device id) */
+ uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */
+ uint boardrev; /* board revision */
+ uint driverrev; /* driver version */
+ uint ucoderev; /* microcode version */
+ uint bus; /* bus type */
+ uint chipnum; /* chip number */
+ uint phytype; /* phy type */
+ uint phyrev; /* phy revision */
+ uint anarev; /* anacore rev */
+ uint chippkg; /* chip package info */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH 48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+ uint instance;
+ char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+/* structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+ uint16 magic;
+ uint16 fifo;
+ uint16 size;
+} wl_txfifo_sz_t;
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC 0xa5a5
+
+/* Transfer info about an IOVar from the driver */
+/* Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+ uint8 module;
+ uint8 type;
+ char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+/* bump this number if you change the ioctl interface */
+#ifdef D11AC_IOTYPES
+#define WLC_IOCTL_VERSION 2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1
+#else
+#define WLC_IOCTL_VERSION 1
+#endif /* D11AC_IOTYPES */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
+#endif
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */
+#define WLC_GET_RATE 12
+#define WLC_GET_MAX_RATE 13
+#define WLC_GET_INSTANCE 14
+/* #define WLC_GET_FRAG 15 */ /* no longer supported */
+/* #define WLC_SET_FRAG 16 */ /* no longer supported */
+/* #define WLC_GET_RTS 17 */ /* no longer supported */
+/* #define WLC_SET_RTS 18 */ /* no longer supported */
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+#define WLC_TERMINATED 28
+/* #define WLC_DUMP_SCB 28 */ /* no longer supported */
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+#define WLC_GET_FIXRATE 42
+#define WLC_SET_FIXRATE 43
+/* #define WLC_GET_WEP 42 */ /* no longer supported */
+/* #define WLC_SET_WEP 43 */ /* no longer supported */
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60 /* diag */
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+/* #define WLC_GET_TXPWR 65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR 66 */ /* no longer supported */
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+/* #define WLC_GET_LOCALE 73 */ /* no longer supported */
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK 90 /* ndis only */
+#define WLC_SET_FORCELINK 91 /* ndis only */
+#define WLC_FREQ_ACCURACY 92 /* diag */
+#define WLC_CARRIER_SUPPRESS 93 /* diag */
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
+/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114 /* current rateset */
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+/* #define WLC_GET_MRATE 153 */ /* no longer supported */
+/* #define WLC_SET_MRATE 154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_PHYLIST 180
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
+#define WLC_DECRYPT_STATUS 182 /* ndis only */
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196 /* diag */
+#define WLC_SET_SHMEM 197 /* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST 200
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+#define WLC_SET_LED 209
+#define WLC_GET_LED 210
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VAR 262 /* get value of named variable */
+#define WLC_SET_VAR 263 /* set named variable to value */
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_NVRAM_GET 264 /* deprecated */
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#define WLC_SET_WSEC_PMK 268
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */
+/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */
+#define WLC_GET_CMD 309
+/* #define WLC_LAST 310 */ /* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE 317
+#define WLC_LAST 318
+
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE 0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE 0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS 1
+#define WL_DECRYPT_STATUS_FAILURE 2
+#define WL_DECRYPT_STATUS_UNKNOWN 3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS 0
+#define WLC_UPGRADE_PENDING 1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+/* struct passed in for WLC_NDCONFIG_ITEM */
+typedef struct {
+ char *name;
+ void *param;
+} ndconfig_item_t;
+#endif
+
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
+#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE (1U<<31)
+#define WL_TXPWR_NEG (1U<<30)
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
+
+#define WL_PHY_PAVAR_VER 1 /* pavars version */
+
+typedef struct wl_po {
+ uint16 phy_type; /* Phy type */
+ uint16 band;
+ uint16 cckpo;
+ uint32 ofdmpo;
+ uint16 mcspo[8];
+} wl_po_t;
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
+#define WL_DIAG_MEMORY 3 /* d11 memory test */
+#define WL_DIAG_LED 4 /* LED test */
+#define WL_DIAG_REG 5 /* d11/phy register test */
+#define WL_DIAG_SROM 6 /* srom read/crc test */
+#define WL_DIAG_DMA 7 /* DMA test */
+#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
+#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
+#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
+
+/* band types */
+#define WLC_BAND_AUTO 0 /* auto-select */
+#define WLC_BAND_5G 1 /* 5 Ghz */
+#define WLC_BAND_2G 2 /* 2.4 Ghz */
+#define WLC_BAND_ALL 3 /* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0 1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_LCN40 10
+#define WLC_PHY_TYPE_AC 11
+#define WLC_PHY_TYPE_NULL 0xf
+
+/* Values for PM */
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
+#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+#define WLC_N_PREAMBLE_GF_BRCM 2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+#define WLC_BW_20MHZ_BIT (1<<0)
+#define WLC_BW_40MHZ_BIT (1<<1)
+#define WLC_BW_80MHZ_BIT (1<<2)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+#define WLC_VHT_SGI_80 0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL 0x02
+
+#define LISTEN_INTERVAL 10
+/* interference mitigation options */
+#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
+#define INTERFERE_NONE 0 /* off */
+#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
+#define WLAN_MANUAL 2 /* ACI: no auto detection */
+#define WLAN_AUTO 3 /* ACI: auto detect */
+#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
+
+typedef struct wl_aci_args {
+ int enter_aci_thresh; /* Trigger level to start detecting ACI */
+ int exit_aci_thresh; /* Trigger level to exit ACI mode */
+ int usec_spin; /* microsecs to delay between rssi samples */
+ int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+ uint16 nphy_adcpwr_enter_thresh; /* ADC power to enter ACI mitigation mode */
+ uint16 nphy_adcpwr_exit_thresh; /* ADC power to exit ACI mitigation mode */
+ uint16 nphy_repeat_ctr; /* Number of tries per channel to compute power */
+ uint16 nphy_num_samples; /* Number of samples to compute power on one channel */
+ uint16 nphy_undetect_window_sz; /* num of undetects to exit ACI Mitigation mode */
+ uint16 nphy_b_energy_lo_aci; /* low ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_md_aci; /* mid ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_hi_aci; /* high ACI power energy threshold for bphy */
+ uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
+ uint16 nphy_noise_noassoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_glitch_th_up;
+ uint16 nphy_noise_assoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_aci_glitch_th_up;
+ uint16 nphy_noise_assoc_aci_glitch_th_dn;
+ uint16 nphy_noise_assoc_enter_th;
+ uint16 nphy_noise_noassoc_enter_th;
+ uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+ uint16 nphy_noise_noassoc_crsidx_incr;
+ uint16 nphy_noise_assoc_crsidx_incr;
+ uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define TRIGGER_NOW 0
+#define TRIGGER_CRS 0x01
+#define TRIGGER_CRSDEASSERT 0x02
+#define TRIGGER_GOODFCS 0x04
+#define TRIGGER_BADFCS 0x08
+#define TRIGGER_BADPLCP 0x10
+#define TRIGGER_CRSGLITCH 0x20
+#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */
+#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+ /* version 0 fields */
+ uint8 coll_us;
+ int cores;
+ /* add'l version 1 fields */
+ uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */
+ uint16 length; /* length of entire structure */
+ int8 trigger;
+ uint16 timeout;
+ uint16 mode;
+ uint32 pre_dur;
+ uint32 post_dur;
+ uint8 gpio_sel;
+ bool downsamp;
+ bool be_deaf;
+ bool agc; /* loop from init gain and going down */
+ bool filter; /* override high pass corners to lowest */
+ /* add'l version 2 fields */
+ uint8 trigger_state;
+ uint8 module_sel1;
+ uint8 module_sel2;
+ uint16 nsamps;
+} wl_samplecollect_args_t;
+
+#define WL_SAMPLEDATA_HEADER_TYPE 1
+#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
+#define WL_SAMPLEDATA_TYPE 2
+#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */
+#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */
+#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+ uint16 version; /* structure version */
+ uint16 size; /* size of structure */
+ uint16 tag; /* Header/Data */
+ uint16 length; /* data length */
+ uint32 flag; /* bit def */
+} wl_sampledata_t;
+
+/* wl_radar_args_t */
+typedef struct {
+ int npulses; /* required number of pulses at n * t_int */
+ int ncontig; /* required number of pulses at t_int */
+ int min_pw; /* minimum pulse width (20 MHz clocks) */
+ int max_pw; /* maximum pulse width (20 MHz clocks) */
+ uint16 thresh0; /* Radar detection, thresh 0 */
+ uint16 thresh1; /* Radar detection, thresh 1 */
+ uint16 blank; /* Radar detection, blank control */
+ uint16 fmdemodcfg; /* Radar detection, fmdemod config */
+ int npulses_lp; /* Radar detection, minimum long pulses */
+ int min_pw_lp; /* Minimum pulsewidth for long pulses */
+ int max_pw_lp; /* Maximum pulsewidth for long pulses */
+ int min_fm_lp; /* Minimum fm for long pulses */
+ int max_span_lp; /* Maximum deltat for long pulses */
+ int min_deltat; /* Minimum spacing between pulses */
+ int max_deltat; /* Maximum spacing between pulses */
+ uint16 autocorr; /* Radar detection, autocorr on or off */
+ uint16 st_level_time; /* Radar detection, start_timing level */
+ uint16 t2_min; /* minimum clocks needed to remain in state 2 */
+ uint32 version; /* version */
+ uint32 fra_pulse_err; /* sample error margin for detecting French radar pulsed */
+ int npulses_fra; /* Radar detection, minimum French pulses set */
+ int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */
+ int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */
+ uint16 percal_mask; /* defines which period cal is masked from radar detection */
+ int quant; /* quantization resolution to pulse positions */
+ uint32 min_burst_intv_lp; /* minimum burst to burst interval for bin3 radar */
+ uint32 max_burst_intv_lp; /* maximum burst to burst interval for bin3 radar */
+ int nskip_rst_lp; /* number of skipped pulses before resetting lp buffer */
+ int max_pw_tol; /* maximum tollerance allowed in detected pulse width for radar detection */
+ uint16 feature_mask; /* 16-bit mask to specify enabled features */
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+ uint32 version; /* version */
+ uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+ uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+} wl_radar_thr_t;
+
+#define WL_RADAR_THR_VERSION 2
+#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */
+#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */
+#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
+ * detection once
+ */
+#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* RSSI per antenna */
+typedef struct {
+ uint32 version; /* version field */
+ uint32 count; /* number of valid antenna rssi */
+ int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */
+} wl_rssi_ant_t;
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */
+#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */
+#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */
+#define WL_DFS_CACSTATE_CSA 3 /* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */
+#define WL_DFS_CACSTATES 7 /* this many states exist */
+
+/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+ uint state; /* noted by WL_DFS_CACSTATE_XX. */
+ uint duration; /* time spent in ms in state. */
+ /* as dfs enters ISM state, it removes the operational channel from quiet channel
+ * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+ */
+ chanspec_t chanspec_cleared;
+ /* chanspec cleared used to be a uint, add another to uint16 to maintain size */
+ uint16 pad;
+} wl_dfs_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+ uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */
+ uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */
+ uint8 txpwr_local_max; /* local max according to the AP */
+ uint8 txpwr_local_constraint; /* local constraint according to the AP */
+ uint8 txpwr_chan_reg_max; /* Regulatory max for this channel */
+ uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */
+ uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */
+ uint8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */
+ uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */
+ int8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY 45
+#define WL_TX_POWER_MCS20_FIRST 12
+#define WL_TX_POWER_MCS20_NUM 16
+#define WL_TX_POWER_MCS40_FIRST 28
+#define WL_TX_POWER_MCS40_NUM 17
+
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF
+ * chain without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */
+ uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */
+ uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
+ uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */
+} tx_power_legacy2_t;
+
+/* TX Power index defines */
+#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT 10
+#define WL_NUM_RATES_MCS32 1
+
+#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK
+#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM
+#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS_2_STREAM WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS32 WL_NUM_RATES_MCS32
+#define WL_TX_POWER_CCK_NUM WL_NUM_RATES_CCK
+#define WL_TX_POWER_OFDM_NUM WL_NUM_RATES_OFDM
+#define WL_TX_POWER_MCS_1_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_2_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_32_NUM WL_NUM_RATES_MCS32
+
+#define WL_NUM_2x2_ELEMENTS 4
+#define WL_NUM_3x3_ELEMENTS 6
+
+typedef struct txppr {
+ /* start of 20MHz tx power limits */
+ uint8 b20_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 40MHz tx power limits */
+ uint8 b40_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b40_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b40_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 20in40MHz tx power limits */
+ uint8 b20in40_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20in40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20in40_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20in40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20in40_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* 20 in 40 MHz Legacy OFDM CDD */
+ uint8 b20in40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20in40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20in40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20in40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20in40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20in40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20in40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20in40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20in40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20in40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20in40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 80MHz tx power limits */
+ uint8 b80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 20in80MHz tx power limits */
+ uint8 b20in80_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20in80_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20in80_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 40in80MHz tx power limits */
+ uint8 b40in80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b40in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b40in80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b40in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b40in80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* MHz Legacy OFDM CDD */
+ uint8 b40in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b40in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b40in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b40in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b40in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b40in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b40in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b40in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b40in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b40in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b40in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ uint8 mcs32; /* C_CHECK - THIS NEEDS TO BE REMOVED THROUGHOUT THE CODE */
+} txppr_t;
+
+/* 20MHz */
+#define WL_TX_POWER_CCK_FIRST OFFSETOF(txppr_t, b20_1x1dsss)
+#define WL_TX_POWER_OFDM20_FIRST OFFSETOF(txppr_t, b20_1x1ofdm)
+#define WL_TX_POWER_MCS20_SISO_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
+#define WL_TX_POWER_20_S1x1_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
+
+#define WL_TX_POWER_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2dsss)
+#define WL_TX_POWER_OFDM20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
+#define WL_TX_POWER_20_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS20_STBC_FIRST OFFSETOF(txppr_t, b20_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS20_SDM_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
+#define WL_TX_POWER_20_S2x2_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3dsss)
+#define WL_TX_POWER_OFDM20_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_ofdm)
+#define WL_TX_POWER_20_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_mcs0)
+#define WL_TX_POWER_20_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3stbc_mcs0)
+#define WL_TX_POWER_20_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3sdm_mcs8)
+#define WL_TX_POWER_20_S3x3_FIRST OFFSETOF(txppr_t, b20_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20_S1X1_VHT OFFSETOF(txppr_t, b20_1x1vht)
+#define WL_TX_POWER_20_S1X2_CDD_VHT OFFSETOF(txppr_t, b20_1x2cdd_vht)
+#define WL_TX_POWER_20_S2X2_STBC_VHT OFFSETOF(txppr_t, b20_2x2stbc_vht)
+#define WL_TX_POWER_20_S2X2_VHT OFFSETOF(txppr_t, b20_2x2sdm_vht)
+#define WL_TX_POWER_20_S1X3_CDD_VHT OFFSETOF(txppr_t, b20_1x3cdd_vht)
+#define WL_TX_POWER_20_S2X3_STBC_VHT OFFSETOF(txppr_t, b20_2x3stbc_vht)
+#define WL_TX_POWER_20_S2X3_VHT OFFSETOF(txppr_t, b20_2x3sdm_vht)
+#define WL_TX_POWER_20_S3X3_VHT OFFSETOF(txppr_t, b20_3x3sdm_vht)
+
+/* 40MHz */
+#define WL_TX_POWER_40_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40_dummy1x1dsss)
+#define WL_TX_POWER_OFDM40_FIRST OFFSETOF(txppr_t, b40_1x1ofdm)
+#define WL_TX_POWER_MCS40_SISO_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
+#define WL_TX_POWER_40_S1x1_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
+
+#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40_dummy1x2dsss)
+#define WL_TX_POWER_OFDM40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
+#define WL_TX_POWER_40_S1x2_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS40_STBC_FIRST OFFSETOF(txppr_t, b40_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS40_SDM_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
+#define WL_TX_POWER_40_S2x2_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
+
+#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_dummy1x3dsss)
+#define WL_TX_POWER_OFDM40_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_ofdm)
+#define WL_TX_POWER_40_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_mcs0)
+#define WL_TX_POWER_40_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3stbc_mcs0)
+#define WL_TX_POWER_40_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3sdm_mcs8)
+#define WL_TX_POWER_40_S3x3_FIRST OFFSETOF(txppr_t, b40_3x3sdm_mcs16)
+
+#define WL_TX_POWER_40_S1X1_VHT OFFSETOF(txppr_t, b40_1x1vht)
+#define WL_TX_POWER_40_S1X2_CDD_VHT OFFSETOF(txppr_t, b40_1x2cdd_vht)
+#define WL_TX_POWER_40_S2X2_STBC_VHT OFFSETOF(txppr_t, b40_2x2stbc_vht)
+#define WL_TX_POWER_40_S2X2_VHT OFFSETOF(txppr_t, b40_2x2sdm_vht)
+#define WL_TX_POWER_40_S1X3_CDD_VHT OFFSETOF(txppr_t, b40_1x3cdd_vht)
+#define WL_TX_POWER_40_S2X3_STBC_VHT OFFSETOF(txppr_t, b40_2x3stbc_vht)
+#define WL_TX_POWER_40_S2X3_VHT OFFSETOF(txppr_t, b40_2x3sdm_vht)
+#define WL_TX_POWER_40_S3X3_VHT OFFSETOF(txppr_t, b40_3x3sdm_vht)
+
+/* 20 in 40MHz */
+#define WL_TX_POWER_20UL_CCK_FIRST OFFSETOF(txppr_t, b20in40_1x1dsss)
+#define WL_TX_POWER_20UL_OFDM_FIRST OFFSETOF(txppr_t, b20in40_1x1ofdm)
+#define WL_TX_POWER_20UL_S1x1_FIRST OFFSETOF(txppr_t, b20in40_1x1mcs0)
+
+#define WL_TX_POWER_CCK_20U_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2dsss)
+#define WL_TX_POWER_20UL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_ofdm)
+#define WL_TX_POWER_20UL_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_mcs0)
+#define WL_TX_POWER_20UL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2stbc_mcs0)
+#define WL_TX_POWER_20UL_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_20U_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3dsss)
+#define WL_TX_POWER_20UL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_ofdm)
+#define WL_TX_POWER_20UL_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_mcs0)
+#define WL_TX_POWER_20UL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3stbc_mcs0)
+#define WL_TX_POWER_20UL_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3sdm_mcs8)
+#define WL_TX_POWER_20UL_S3x3_FIRST OFFSETOF(txppr_t, b20in40_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20UL_S1X1_VHT OFFSETOF(txppr_t, b20in40_1x1vht)
+#define WL_TX_POWER_20UL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in40_1x2cdd_vht)
+#define WL_TX_POWER_20UL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in40_2x2stbc_vht)
+#define WL_TX_POWER_20UL_S2X2_VHT OFFSETOF(txppr_t, b20in40_2x2sdm_vht)
+#define WL_TX_POWER_20UL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in40_1x3cdd_vht)
+#define WL_TX_POWER_20UL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in40_2x3stbc_vht)
+#define WL_TX_POWER_20UL_S2X3_VHT OFFSETOF(txppr_t, b20in40_2x3sdm_vht)
+#define WL_TX_POWER_20UL_S3X3_VHT OFFSETOF(txppr_t, b20in40_3x3sdm_vht)
+
+/* 80MHz */
+#define WL_TX_POWER_80_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b80_dummy1x1dsss)
+#define WL_TX_POWER_OFDM80_FIRST OFFSETOF(txppr_t, b80_1x1ofdm)
+#define WL_TX_POWER_MCS80_SISO_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
+#define WL_TX_POWER_80_S1x1_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
+
+#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b80_dummy1x2dsss)
+#define WL_TX_POWER_OFDM80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
+#define WL_TX_POWER_80_S1x2_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS80_STBC_FIRST OFFSETOF(txppr_t, b80_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS80_SDM_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
+#define WL_TX_POWER_80_S2x2_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_dummy1x3dsss)
+#define WL_TX_POWER_OFDM80_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_ofdm)
+#define WL_TX_POWER_80_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_mcs0)
+#define WL_TX_POWER_80_STBC_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3stbc_mcs0)
+#define WL_TX_POWER_80_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3sdm_mcs8)
+#define WL_TX_POWER_80_S3x3_FIRST OFFSETOF(txppr_t, b80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_80_S1X1_VHT OFFSETOF(txppr_t, b80_1x1vht)
+#define WL_TX_POWER_80_S1X2_CDD_VHT OFFSETOF(txppr_t, b80_1x2cdd_vht)
+#define WL_TX_POWER_80_S2X2_STBC_VHT OFFSETOF(txppr_t, b80_2x2stbc_vht)
+#define WL_TX_POWER_80_S2X2_VHT OFFSETOF(txppr_t, b80_2x2sdm_vht)
+#define WL_TX_POWER_80_S1X3_CDD_VHT OFFSETOF(txppr_t, b80_1x3cdd_vht)
+#define WL_TX_POWER_80_S2X3_STBC_VHT OFFSETOF(txppr_t, b80_2x3stbc_vht)
+#define WL_TX_POWER_80_S2X3_VHT OFFSETOF(txppr_t, b80_2x3sdm_vht)
+#define WL_TX_POWER_80_S3X3_VHT OFFSETOF(txppr_t, b80_3x3sdm_vht)
+
+/* 20 in 80MHz */
+#define WL_TX_POWER_20UUL_CCK_FIRST OFFSETOF(txppr_t, b20in80_1x1dsss)
+#define WL_TX_POWER_20UUL_OFDM_FIRST OFFSETOF(txppr_t, b20in80_1x1ofdm)
+#define WL_TX_POWER_20UUL_S1x1_FIRST OFFSETOF(txppr_t, b20in80_1x1mcs0)
+
+#define WL_TX_POWER_CCK_20UU_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2dsss)
+#define WL_TX_POWER_20UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_ofdm)
+#define WL_TX_POWER_20UUL_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_mcs0)
+#define WL_TX_POWER_20UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2stbc_mcs0)
+#define WL_TX_POWER_20UUL_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_20UU_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3dsss)
+#define WL_TX_POWER_20UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_ofdm)
+#define WL_TX_POWER_20UUL_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_mcs0)
+#define WL_TX_POWER_20UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3stbc_mcs0)
+#define WL_TX_POWER_20UUL_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3sdm_mcs8)
+#define WL_TX_POWER_20UUL_S3x3_FIRST OFFSETOF(txppr_t, b20in80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20UUL_S1X1_VHT OFFSETOF(txppr_t, b20in80_1x1vht)
+#define WL_TX_POWER_20UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in80_1x2cdd_vht)
+#define WL_TX_POWER_20UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in80_2x2stbc_vht)
+#define WL_TX_POWER_20UUL_S2X2_VHT OFFSETOF(txppr_t, b20in80_2x2sdm_vht)
+#define WL_TX_POWER_20UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in80_1x3cdd_vht)
+#define WL_TX_POWER_20UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in80_2x3stbc_vht)
+#define WL_TX_POWER_20UUL_S2X3_VHT OFFSETOF(txppr_t, b20in80_2x3sdm_vht)
+#define WL_TX_POWER_20UUL_S3X3_VHT OFFSETOF(txppr_t, b20in80_3x3sdm_vht)
+
+/* 40 in 80MHz */
+#define WL_TX_POWER_40UUL_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40in80_dummy1x1dsss)
+#define WL_TX_POWER_40UUL_OFDM_FIRST OFFSETOF(txppr_t, b40in80_1x1ofdm)
+#define WL_TX_POWER_40UUL_S1x1_FIRST OFFSETOF(txppr_t, b40in80_1x1mcs0)
+
+#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40in80_dummy1x2dsss)
+#define WL_TX_POWER_40UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_ofdm)
+#define WL_TX_POWER_40UUL_S1x2_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_mcs0)
+#define WL_TX_POWER_40UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2stbc_mcs0)
+#define WL_TX_POWER_40UUL_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_dummy1x3dsss)
+#define WL_TX_POWER_40UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_ofdm)
+#define WL_TX_POWER_40UUL_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_mcs0)
+#define WL_TX_POWER_40UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3stbc_mcs0)
+#define WL_TX_POWER_40UUL_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3sdm_mcs8)
+#define WL_TX_POWER_40UUL_S3x3_FIRST OFFSETOF(txppr_t, b40in80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_40UUL_S1X1_VHT OFFSETOF(txppr_t, b40in80_1x1vht)
+#define WL_TX_POWER_40UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b40in80_1x2cdd_vht)
+#define WL_TX_POWER_40UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b40in80_2x2stbc_vht)
+#define WL_TX_POWER_40UUL_S2X2_VHT OFFSETOF(txppr_t, b40in80_2x2sdm_vht)
+#define WL_TX_POWER_40UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b40in80_1x3cdd_vht)
+#define WL_TX_POWER_40UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b40in80_2x3stbc_vht)
+#define WL_TX_POWER_40UUL_S2X3_VHT OFFSETOF(txppr_t, b40in80_2x3sdm_vht)
+#define WL_TX_POWER_40UUL_S3X3_VHT OFFSETOF(txppr_t, b40in80_3x3sdm_vht)
+
+#define WL_TX_POWER_MCS_32 OFFSETOF(txppr_t, mcs32) /* C_CHECK remove later */
+
+#define WL_TX_POWER_RATES sizeof(struct txppr)
+
+/* sslpnphy specifics */
+#define WL_TX_POWER_MCS20_SISO_FIRST_SSN WL_TX_POWER_MCS20_SISO_FIRST
+#define WL_TX_POWER_MCS40_SISO_FIRST_SSN WL_TX_POWER_MCS40_SISO_FIRST
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+
+typedef struct {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 ppr[WL_TX_POWER_RATES]; /* Latest target power */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION 0
+#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION 43
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ 0
+#define WL_BW_40MHZ 1
+#define WL_BW_80MHZ 2
+
+/* tx_power_t.flags bits */
+#ifdef PPR_API
+#define WL_TX_POWER2_F_ENABLED 1
+#define WL_TX_POWER2_F_HW 2
+#define WL_TX_POWER2_F_MIMO 4
+#define WL_TX_POWER2_F_SISO 8
+#define WL_TX_POWER2_F_HT 0x10
+#else
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+#endif
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /* Maximum target power among all rates */
+ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
+ uint8 user_limit[WL_TX_POWER_RATES]; /* User limit */
+ int8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
+ int8 target[WL_TX_POWER_RATES]; /* Latest target power */
+ int8 clm_limits[WL_NUMRATES]; /* regulatory limits - 20, 40 or 80MHz */
+ int8 clm_limits_subchan1[WL_NUMRATES]; /* regulatory limits - 20in40 or 40in80 */
+ int8 clm_limits_subchan2[WL_NUMRATES]; /* regulatory limits - 20in80MHz */
+ int8 sar; /* SAR limit for display by wl executable */
+ int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */
+ uint8 version; /* Version of the data format wlu <--> driver */
+ uint8 display_core; /* Displayed curpower core */
+#ifdef PPR_API
+} tx_power_new_t;
+#else
+} tx_power_t;
+#endif
+
+typedef struct tx_inst_power {
+ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /* Maximum target power among all rates */
+ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
+ txppr_t user_limit; /* User limit */
+ txppr_t reg_limit; /* Regulatory power limit */
+ txppr_t board_limit; /* Max power board can support (SROM) */
+ txppr_t target; /* Latest target power */
+} wl_txpwr_t;
+
+#define WL_NUM_TXCHAIN_MAX 4
+typedef struct wl_txchain_pwr_offsets {
+ int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC 1
+#define WLC_MEASURE_CHANNEL_BASIC 2
+#define WLC_MEASURE_CHANNEL_CCA 3
+#define WLC_MEASURE_CHANNEL_RPI 4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
+
+#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define WL_BTC_DISABLE 0 /* disable BT coexistence */
+#define WL_BTC_FULLTDM 1 /* full TDM COEX */
+#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */
+#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */
+#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT 8 /* set the default mode for the device */
+#define WL_INF_BTC_DISABLE 0
+#define WL_INF_BTC_ENABLE 1
+#define WL_INF_BTC_AUTO 3
+
+/* BTC wire used by "btc_wire" iovar */
+#define WL_BTC_DEFWIRE 0 /* use default wire setting */
+#define WL_BTC_2WIRE 2 /* use 2-wire BTC */
+#define WL_BTC_3WIRE 3 /* use 3-wire BTC */
+#define WL_BTC_4WIRE 4 /* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT (1 << 0)
+#define WL_BTC_FLAG_BT_DEF (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP (1 << 5)
+#define WL_BTC_FLAG_ECI (1 << 6)
+#define WL_BTC_FLAG_LIGHT (1 << 7)
+#define WL_BTC_FLAG_PARALLEL (1 << 8)
+
+/* Message levels */
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_PHYCAL_VAL 0x00100000 /* retired in TOT on 6/10/2009 */
+#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL 0x04000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_PROTO_VAL 0x00000020
+#define WL_BTA_VAL 0x00000040
+#define WL_CHANINT_VAL 0x00000080
+#define WL_THERMAL_VAL 0x00000100 /* retired in TOT on 6/10/2009 */
+#define WL_P2P_VAL 0x00000200
+#define WL_ITFR_VAL 0x00000400
+#define WL_MCHAN_VAL 0x00000800
+#define WL_TDLS_VAL 0x00001000
+#define WL_MCNX_VAL 0x00002000
+#define WL_PROT_VAL 0x00004000
+#define WL_PSTA_VAL 0x00008000
+#define WL_TBTT_VAL 0x00010000
+#define WL_NIC_VAL 0x00020000
+#define WL_PWRSEL_VAL 0x00040000
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL 0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define WL_LED_OFF 0 /* always off */
+#define WL_LED_ON 1 /* always on */
+#define WL_LED_ACTIVITY 2 /* activity */
+#define WL_LED_RADIO 3 /* radio enabled */
+#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
+#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
+#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
+#define WL_LED_ASSOC 10 /* associated state indicator */
+#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
+#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
+#define WL_LED_WI4 13
+#define WL_LED_WI5 14
+#define WL_LED_BLINKSLOW 15 /* blink slow */
+#define WL_LED_BLINKMED 16 /* blink med */
+#define WL_LED_BLINKFAST 17 /* blink fast */
+#define WL_LED_BLINKCUSTOM 18 /* blink custom */
+#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
+ /* keep on for 300 sec */
+#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
+#define WL_LED_NUMBEHAVIOR 22
+
+/* led behavior numeric value format */
+#define WL_LED_BEH_MASK 0x7f /* behavior mask */
+#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#define WL_NUMCHANSPECS 110
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
+#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */
+
+/* number of bytes needed to define a 128-bit mask for MAC event reporting */
+#define WL_EVENTING_MASK_LEN 16
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value. RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ * uint8 akm[DOT11_OUI_LEN+1]; akm suite
+ * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost balue in dB
+ */
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI 1 /* by RSSI */
+#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */
+#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+ uint8 octets[3];
+};
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define NFIFO 6 /* # tx/rx fifopairs */
+
+#define WL_CNT_T_VERSION 8 /* current version of wl_cnt_t struct */
+
+typedef struct {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors (derived: sum of others) */
+ uint32 txctl; /* tx management frames */
+ uint32 txprshort; /* tx short preamble frames */
+ uint32 txserr; /* tx status errors */
+ uint32 txnobuf; /* tx out of buffers errors */
+ uint32 txnoassoc; /* tx discard because we're not associated */
+ uint32 txrunt; /* tx runt frames */
+ uint32 txchit; /* tx header cache hit (fastpath) */
+ uint32 txcmiss; /* tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /* tx fifo underflows */
+ uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txphycrs;
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+ uint32 rxerror; /* rx data errors (derived: sum of others) */
+ uint32 rxctl; /* rx management frames */
+ uint32 rxnobuf; /* rx out of buffers errors */
+ uint32 rxnondata; /* rx non data frames in the data channel errors */
+ uint32 rxbadds; /* rx bad DS errors */
+ uint32 rxbadcm; /* rx bad control or management frames */
+ uint32 rxfragerr; /* rx fragmentation errors */
+ uint32 rxrunt; /* rx runt frames */
+ uint32 rxgiant; /* rx giant frames */
+ uint32 rxnoscb; /* rx no scb error */
+ uint32 rxbadproto; /* rx invalid frames */
+ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
+ uint32 rxbadda; /* rx frames tossed for invalid da */
+ uint32 rxfilter; /* rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /* rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /* tx/rx dma descriptor errors */
+ uint32 dmada; /* tx/rx dma data errors */
+ uint32 dmape; /* tx/rx dma descriptor protocol errors */
+ uint32 reset; /* reset count */
+ uint32 tbtt; /* cnts the TBTT int's */
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail; /* callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /* number of RTS sent out by the MAC */
+ uint32 txctsfrm; /* number of CTS sent out by the MAC */
+ uint32 txackfrm; /* number of ACK frames sent out */
+ uint32 txdnlfrm; /* Not used */
+ uint32 txbcnfrm; /* beacons transmitted */
+ uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /* parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /* Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /* beacons received from other BSS */
+ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /* Number of PMQ overflows */
+ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /* obsolete */
+ uint32 frmscons; /* obsolete */
+ uint32 txnack; /* obsolete */
+ uint32 txglitch_nack; /* obsolete */
+ uint32 txburst; /* obsolete */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /* TKIPReplays */
+ uint32 ccmpfmterr; /* CCMPFormatErrors */
+ uint32 ccmpreplay; /* CCMPReplays */
+ uint32 ccmpundec; /* CCMPDecryptErrors */
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
+ uint32 wepundec; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr; /* dot11WEPICVErrorCount */
+ uint32 decsuccess; /* DecryptSuccessCount */
+ uint32 tkipicverr; /* TKIPICVErrorCount */
+ uint32 wepexcluded; /* dot11WEPExcludedCount */
+
+ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /* Count PSM watchdogs */
+ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /* PRQ entries read in */
+ uint32 prq_undirected_entries; /* which were bcast bss & ssid */
+ uint32 prq_bad_entries; /* which could not be translated to info */
+ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /* count of radio disables */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+
+ uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /* count for sgi transmit */
+ uint32 rxmpdu_sgi; /* count for sgi received */
+ uint32 txmpdu_stbc; /* count for stbc transmit */
+ uint32 rxmpdu_stbc; /* count for stbc received */
+
+ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /* TKIPReplays */
+ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /* CCMPReplays */
+ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /* DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
+
+ uint32 dma_hang; /* count for dma hang */
+ uint32 reinit; /* count for reinit */
+
+ uint32 pstatxucast; /* count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /* count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /* count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /* count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */
+
+ uint32 cso_passthrough; /* hw cso required but passthrough */
+} wl_cnt_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors (derived: sum of others) */
+ uint32 txctl; /* tx management frames */
+ uint32 txprshort; /* tx short preamble frames */
+ uint32 txserr; /* tx status errors */
+ uint32 txnobuf; /* tx out of buffers errors */
+ uint32 txnoassoc; /* tx discard because we're not associated */
+ uint32 txrunt; /* tx runt frames */
+ uint32 txchit; /* tx header cache hit (fastpath) */
+ uint32 txcmiss; /* tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /* tx fifo underflows */
+ uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txphycrs;
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+ uint32 rxerror; /* rx data errors (derived: sum of others) */
+ uint32 rxctl; /* rx management frames */
+ uint32 rxnobuf; /* rx out of buffers errors */
+ uint32 rxnondata; /* rx non data frames in the data channel errors */
+ uint32 rxbadds; /* rx bad DS errors */
+ uint32 rxbadcm; /* rx bad control or management frames */
+ uint32 rxfragerr; /* rx fragmentation errors */
+ uint32 rxrunt; /* rx runt frames */
+ uint32 rxgiant; /* rx giant frames */
+ uint32 rxnoscb; /* rx no scb error */
+ uint32 rxbadproto; /* rx invalid frames */
+ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
+ uint32 rxbadda; /* rx frames tossed for invalid da */
+ uint32 rxfilter; /* rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /* rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /* tx/rx dma descriptor errors */
+ uint32 dmada; /* tx/rx dma data errors */
+ uint32 dmape; /* tx/rx dma descriptor protocol errors */
+ uint32 reset; /* reset count */
+ uint32 tbtt; /* cnts the TBTT int's */
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail; /* callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /* number of RTS sent out by the MAC */
+ uint32 txctsfrm; /* number of CTS sent out by the MAC */
+ uint32 txackfrm; /* number of ACK frames sent out */
+ uint32 txdnlfrm; /* Not used */
+ uint32 txbcnfrm; /* beacons transmitted */
+ uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /* parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /* Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /* beacons received from other BSS */
+ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /* Number of PMQ overflows */
+ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack;
+ uint32 frmscons;
+ uint32 txnack;
+ uint32 txglitch_nack; /* obsolete */
+ uint32 txburst; /* obsolete */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /* TKIPReplays */
+ uint32 ccmpfmterr; /* CCMPFormatErrors */
+ uint32 ccmpreplay; /* CCMPReplays */
+ uint32 ccmpundec; /* CCMPDecryptErrors */
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
+ uint32 wepundec; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr; /* dot11WEPICVErrorCount */
+ uint32 decsuccess; /* DecryptSuccessCount */
+ uint32 tkipicverr; /* TKIPICVErrorCount */
+ uint32 wepexcluded; /* dot11WEPExcludedCount */
+
+ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /* TKIPReplays */
+ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /* CCMPReplays */
+ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /* DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
+
+ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
+ uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+ uint32 psmwds; /* Count PSM watchdogs */
+ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /* PRQ entries read in */
+ uint32 prq_undirected_entries; /* which were bcast bss & ssid */
+ uint32 prq_bad_entries; /* which could not be translated to info */
+ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /* count of radio disables */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+
+ uint32 txmpdu_sgi; /* count for sgi transmit */
+ uint32 rxmpdu_sgi; /* count for sgi received */
+ uint32 txmpdu_stbc; /* count for stbc transmit */
+ uint32 rxmpdu_stbc; /* count for stbc received */
+} wl_cnt_ver_six_t;
+
+#define WL_DELTA_STATS_T_VERSION 1 /* current version of wl_delta_stats_t struct */
+
+typedef struct {
+ uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txfail; /* tx failures */
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+} wl_delta_stats_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
+
+typedef struct {
+ uint32 packets;
+ uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+ uint16 version; /* see definition of WL_WME_CNT_VERSION */
+ uint16 length; /* length of entire structure */
+
+ wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */
+ wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */
+ wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */
+ wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */
+
+ wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */
+
+ wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+struct wl_msglevel2 {
+ uint32 low;
+ uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+ uint16 version; /* Version for mkeep_alive */
+ uint16 length; /* length of fixed parameters in the structure */
+ uint32 period_msec;
+ uint16 len_bytes;
+ uint8 keep_alive_id; /* 0 - 3 for N = 4 */
+ uint8 data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION 1
+#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION 500
+
+#ifdef WLBA
+
+#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */
+
+/* block ack related stats */
+typedef struct wlc_ba_cnt {
+ uint16 version; /* WLC_BA_CNT_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txpdu; /* pdus sent */
+ uint32 txsdu; /* sdus sent */
+ uint32 txfc; /* tx side flow controlled packets */
+ uint32 txfci; /* tx side flow control initiated */
+ uint32 txretrans; /* retransmitted pdus */
+ uint32 txbatimer; /* ba resend due to timer */
+ uint32 txdrop; /* dropped packets */
+ uint32 txaddbareq; /* addba req sent */
+ uint32 txaddbaresp; /* addba resp sent */
+ uint32 txdelba; /* delba sent */
+ uint32 txba; /* ba sent */
+ uint32 txbar; /* bar sent */
+ uint32 txpad[4]; /* future */
+
+ /* receive side counters */
+ uint32 rxpdu; /* pdus recd */
+ uint32 rxqed; /* pdus buffered before sending up */
+ uint32 rxdup; /* duplicate pdus */
+ uint32 rxnobuf; /* pdus discarded due to no buf */
+ uint32 rxaddbareq; /* addba req recd */
+ uint32 rxaddbaresp; /* addba resp recd */
+ uint32 rxdelba; /* delba recd */
+ uint32 rxba; /* ba recd */
+ uint32 rxbar; /* bar recd */
+ uint32 rxinvba; /* invalid ba recd */
+ uint32 rxbaholes; /* ba recd with holes */
+ uint32 rxunexp; /* unexpected packets */
+ uint32 rxpad[4]; /* future */
+} wlc_ba_cnt_t;
+#endif /* WLBA */
+
+/* structure for per-tid ampdu control */
+struct ampdu_tid_control {
+ uint8 tid; /* tid */
+ uint8 enable; /* enable/disable */
+};
+
+/* structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+ struct ether_addr ea; /* Station address */
+ uint8 tid; /* tid */
+};
+/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+ uint8 tid; /* tid */
+ uint8 retry; /* retry value */
+};
+
+/* Different discovery modes for dpt */
+#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */
+#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */
+#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */
+#define DPT_PATHSEL_APPATH 2 /* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+
+/* structure for dpt iovars */
+typedef struct dpt_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode; /* mode: depends on iovar */
+ uint32 pad; /* future */
+} dpt_iovar_t;
+
+/* flags to indicate DPT status */
+#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */
+#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */
+#define DPT_STATUS_FAILED 0x04 /* DPT link failed */
+
+#define DPT_FNAME_LEN 48 /* Max length of friendly name */
+
+typedef struct dpt_status {
+ uint8 status; /* flags to indicate status */
+ uint8 fnlen; /* length of friendly name */
+ uchar name[DPT_FNAME_LEN]; /* friendly name */
+ uint32 rssi; /* RSSI of the link */
+ sta_info_t sta; /* sta info */
+} dpt_status_t;
+
+/* structure for dpt list */
+typedef struct dpt_list {
+ uint32 num; /* number of entries in struct */
+ dpt_status_t status[1]; /* per station info */
+} dpt_list_t;
+
+/* structure for dpt friendly name */
+typedef struct dpt_fname {
+ uint8 len; /* length of friendly name */
+ uchar name[DPT_FNAME_LEN]; /* friendly name */
+} dpt_fname_t;
+
+#define BDD_FNAME_LEN 32 /* Max length of friendly name */
+typedef struct bdd_fname {
+ uint8 len; /* length of friendly name */
+ uchar name[BDD_FNAME_LEN]; /* friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/* For ioctls that take a list of TSPEC */
+struct tslist {
+ int count; /* number of tspecs */
+ struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */
+};
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */
+
+/* structure for tdls iovars */
+typedef struct tdls_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode; /* mode: depends on iovar */
+ chanspec_t chanspec;
+ uint32 pad; /* future */
+} tdls_iovar_t;
+
+/* modes */
+#define TDLS_WFD_IE_TX 0
+#define TDLS_WFD_IE_RX 1
+#define TDLS_WFD_IE_SIZE 255
+/* structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode;
+ uint8 length;
+ uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+#endif /* WLTDLS */
+
+/* structure for addts/delts arguments */
+typedef struct tspec_arg {
+ uint16 version; /* see definition of TSPEC_ARG_VERSION */
+ uint16 length; /* length of entire structure */
+ uint flag; /* bit field */
+ /* TSPEC Arguments */
+ struct tsinfo_arg tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint min_srv_interval; /* Minimum Service Interval (us) */
+ uint max_srv_interval; /* Maximum Service Interval (us) */
+ uint inactivity_interval; /* Inactivity Interval (us) */
+ uint suspension_interval; /* Suspension Interval (us) */
+ uint srv_start_time; /* Service Start Time (us) */
+ uint min_data_rate; /* Minimum Data Rate (bps) */
+ uint mean_data_rate; /* Mean Data Rate (bps) */
+ uint peak_data_rate; /* Peak Data Rate (bps) */
+ uint max_burst_size; /* Maximum Burst Size (bytes) */
+ uint delay_bound; /* Delay Bound (us) */
+ uint min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+ uint8 dialog_token; /* dialog token */
+} tspec_arg_t;
+
+/* tspec arg for desired station */
+typedef struct tspec_per_sta_arg {
+ struct ether_addr ea;
+ struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/* structure for max bandwidth for each access category */
+typedef struct wme_max_bandwidth {
+ uint32 ac[AC_COUNT]; /* max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80
+#define WLC_WOWL_MAX_KEEPALIVE 2
+
+/* define for flag */
+#define TSPEC_PENDING 0 /* TSPEC pending */
+#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
+#define TSPEC_REJECTED 2 /* TSPEC rejected */
+#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
+#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
+
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO 0x0004
+#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+/* Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+ uint32 ac; /* access class */
+ uint32 lifetime; /* Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/* Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+ uint8 mode; /* value 0 or 1 */
+ uint8 count; /* count # of beacons before switching */
+ chanspec_t chspec; /* chanspec */
+ uint8 reg; /* regulatory class */
+} wl_chan_switch_t;
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0) value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ * the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+enum {
+ PFN_LIST_ORDER,
+ PFN_RSSI
+};
+
+enum {
+ DISABLE,
+ ENABLE
+};
+
+enum {
+ OFF_ADAPT,
+ SMART_ADAPT,
+ STRICT_ADAPT,
+ SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+#define SUPPRESS_SSID_BIT 9
+#define ENABLE_NET_OFFLOAD_BIT 10
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+#define SUPPRESS_SSID_MASK 0x0200
+#define ENABLE_NET_OFFLOAD_MASK 0x0400
+
+#define PFN_VERSION 2
+#define PFN_SCANRESULT_VERSION 1
+#define MAX_PFN_LIST_COUNT 16
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+/* PFN network info structure */
+typedef struct wl_pfn_subnet_info {
+ struct ether_addr BSSID;
+ uint8 channel; /* channel number only */
+ uint8 SSID_len;
+ uint8 SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+ wl_pfn_subnet_info_t pfnsubnet;
+ int16 RSSI; /* receive signal strength (in dBm) */
+ uint16 timestamp; /* age in seconds */
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_scanresults {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+/* PFN data structure */
+typedef struct wl_pfn_param {
+ int32 version; /* PNO parameters version */
+ int32 scan_freq; /* Scan frequency */
+ int32 lost_network_timeout; /* Timeout in sec. to declare
+ * discovered network as lost
+ */
+ int16 flags; /* Bit field to control features
+ * of PFN such as sort criteria auto
+ * enable switch and background scan
+ */
+ int16 rssi_margin; /* Margin to avoid jitter for choosing a
+ * PFN based on RSSI sort criteria
+ */
+ uint8 bestn; /* number of best networks in each scan */
+ uint8 mscan; /* number of scans recorded */
+ uint8 repeat; /* Minimum number of scan intervals
+ *before scan frequency changes in adaptive scan
+ */
+ uint8 exp; /* Exponent of 2 for maximum scan interval */
+ int32 slow_freq; /* slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+ struct ether_addr macaddr;
+ /* Bit4: suppress_lost, Bit3: suppress_found */
+ uint16 flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+
+typedef struct wl_pfn_cfg {
+ uint32 reporttype;
+ int32 channel_num;
+ uint16 channel_list[WL_NUMCHANNELS];
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+typedef struct wl_pfn {
+ wlc_ssid_t ssid; /* ssid name and its length */
+ int32 flags; /* bit2: hidden */
+ int32 infra; /* BSS Vs IBSS */
+ int32 auth; /* Open Vs Closed */
+ int32 wpa_auth; /* WPA type */
+ int32 wsec; /* wsec value */
+} wl_pfn_t;
+#define WL_PFN_HIDDEN_BIT 2
+#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK 0x4
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+struct toe_ol_stats_t {
+ /* Num of tx packets that don't need to be checksummed */
+ uint32 tx_summed;
+
+ /* Num of tx packets where checksum is filled by offload engine */
+ uint32 tx_iph_fill;
+ uint32 tx_tcp_fill;
+ uint32 tx_udp_fill;
+ uint32 tx_icmp_fill;
+
+ /* Num of rx packets where toe finds out if checksum is good or bad */
+ uint32 rx_iph_good;
+ uint32 rx_iph_bad;
+ uint32 rx_tcp_good;
+ uint32 rx_tcp_bad;
+ uint32 rx_udp_good;
+ uint32 rx_udp_bad;
+ uint32 rx_icmp_good;
+ uint32 rx_icmp_bad;
+
+ /* Num of tx packets in which csum error is injected */
+ uint32 tx_tcp_errinj;
+ uint32 tx_udp_errinj;
+ uint32 tx_icmp_errinj;
+
+ /* Num of rx packets in which csum error is injected */
+ uint32 rx_tcp_errinj;
+ uint32 rx_udp_errinj;
+ uint32 rx_icmp_errinj;
+};
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+
+/* Arp offload statistic counts */
+struct arp_ol_stats_t {
+ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */
+
+ uint32 arp_table_entries; /* ARP table entries */
+ uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */
+
+ uint32 host_request; /* ARP requests from host */
+ uint32 host_reply; /* ARP replies from host */
+ uint32 host_service; /* ARP requests from host serviced by ARP Agent */
+
+ uint32 peer_request; /* ARP requests received from network */
+ uint32 peer_request_drop; /* ARP requests from network that were dropped */
+ uint32 peer_reply; /* ARP replies received from network */
+ uint32 peer_reply_drop; /* ARP replies from network that were dropped */
+ uint32 peer_service; /* ARP request from host serviced by ARP Agent */
+};
+
+/* NS offload statistic counts */
+struct nd_ol_stats_t {
+ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */
+ uint32 peer_request; /* NS requests received from network */
+ uint32 peer_request_drop; /* NS requests from network that were dropped */
+ uint32 peer_reply_drop; /* NA replies from network that were dropped */
+ uint32 peer_service; /* NS request from host serviced by firmware */
+};
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/* NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+ uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */
+ uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */
+ uint8 data[1]; /* Variable length packet to transmit. Contents should include
+ * entire ethernet packet (enet header, IP header, UDP header,
+ * and UDP payload) in network byte order.
+ */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH /* Pattern matching filter */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+ uint32 offset; /* Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
+ uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
+ * at offset 0. Pattern immediately follows mask.
+ */
+} wl_pkt_filter_pattern_t;
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+ uint32 id; /* Unique filter id, specified by app. */
+ uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ uint32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */
+ } u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+/* IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+ uint32 id; /* Unique filter id */
+ uint32 enable; /* Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+ uint32 num; /* Number of installed packet filters */
+ wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+ uint32 num_pkts_matched; /* # filter matches for specified filter id */
+ uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */
+ uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/* Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+ uint32 cmd; /* common ioctl definition */
+ uint32 len; /* length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES 4
+
+/* These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+ (((cmd) == WLC_GET_MAGIC) || \
+ ((cmd) == WLC_GET_VERSION) || \
+ ((cmd) == WLC_GET_AP) || \
+ ((cmd) == WLC_GET_INSTANCE))
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
+
+typedef struct wl_pkteng {
+ uint32 flags;
+ uint32 delay; /* Inter-packet delay */
+ uint32 nframes; /* Number of frames */
+ uint32 length; /* Packet length */
+ uint8 seqno; /* Enable/disable sequence no. */
+ struct ether_addr dest; /* Destination address */
+ struct ether_addr src; /* Source address */
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+ uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */
+ int32 rssi; /* RSSI */
+ int32 snr; /* signal to noise ratio */
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+
+#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */
+#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */
+#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */
+#define WL_WOWL_TST (1 << 5) /* Wakeup after test */
+#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */
+
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */
+
+typedef struct {
+ uint32 masksize; /* Size of the mask in #of bytes */
+ uint32 offset; /* Offset to start looking for the packet in # of bytes */
+ uint32 patternoffset; /* Offset of start of pattern in the structure */
+ uint32 patternsize; /* Size of the pattern itself in #of bytes */
+ uint32 id; /* id */
+ uint32 reasonsize; /* Size of the wakeup reason code */
+ uint32 flags; /* Flags to tell the pattern type and other properties */
+ /* Mask follows the structure above */
+ /* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct {
+ uint count;
+ wl_wowl_pattern_t pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+ uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */
+ uint16 ucode_wakeind; /* What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+
+/* per AC rate control related data structure */
+typedef struct wl_txrate_class {
+ uint8 init_rate;
+ uint8 min_rate;
+ uint8 max_rate;
+} wl_txrate_class_t;
+
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */
+
+/* structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+ int16 passive_dwell;
+ int16 active_dwell;
+ int16 bss_widthscan_interval;
+ int16 passive_total;
+ int16 active_total;
+ int16 chanwidth_transition_delay;
+ int16 activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+ uint32 rate_limit_msec; /* # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */
+ int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event
+ * will be posted each time the RSSI of received
+ * beacons/packets crosses a level.
+ */
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+ uint8 info;
+ uint8 num;
+ uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/* IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN 4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+typedef struct {
+ uint32 num_addrs;
+ char addr_type[WL_IOV_MAC_PARAM_LEN];
+ struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_params_t;
+
+
+/* Parameter block for PKTQ_LOG statistics */
+typedef struct {
+ uint32 requested; /* packets requested to be stored */
+ uint32 stored; /* packets stored */
+ uint32 saved; /* packets saved,
+ because a lowest priority queue has given away one packet
+ */
+ uint32 selfsaved; /* packets saved,
+ because an older packet from the same queue has been dropped
+ */
+ uint32 full_dropped; /* packets dropped,
+ because pktq is full with higher precedence packets
+ */
+ uint32 dropped; /* packets dropped because pktq per that precedence is full */
+ uint32 sacrificed; /* packets dropped,
+ in order to save one from a queue of a highest priority
+ */
+ uint32 busy; /* packets droped because of hardware/transmission error */
+ uint32 retry; /* packets re-sent because they were not received */
+ uint32 ps_retry; /* packets retried again prior to moving power save mode */
+ uint32 retry_drop; /* packets finally dropped after retry limit */
+ uint32 max_avail; /* the high-water mark of the queue capacity for packets -
+ goes to zero as queue fills
+ */
+ uint32 max_used; /* the high-water mark of the queue utilisation for packets -
+ increases with use ('inverse' of max_avail)
+ */
+ uint32 queue_capacity; /* the maximum capacity of the queue */
+} pktq_log_counters_v01_t;
+
+#define sacrified sacrificed
+
+typedef struct {
+ uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
+ pktq_log_counters_v01_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ char headings[1];
+} pktq_log_format_v01_t;
+
+
+typedef struct {
+ uint32 version;
+ wl_iov_mac_params_t params;
+ union {
+ pktq_log_format_v01_t v01;
+ } pktq_log;
+} wl_iov_pktq_log_t;
+
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER 0x0100
+
+#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON 0x0001
+#define LOG_MODULE_ASSOC 0x0002
+#define LOG_MODULE_EVENT 0x0004
+#define LOG_MODULE_MAX 3 /* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE 0
+#define WL_LOG_LEVEL_ERR 1
+#define WL_LOG_LEVEL_WARN 2
+#define WL_LOG_LEVEL_INFO 3
+#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT 1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL 0
+#define LOG_ARGTYPE_STR 1 /* %s */
+#define LOG_ARGTYPE_INT 2 /* %d */
+#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */
+#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */
+
+typedef struct wlc_extlog_cfg {
+ int max_number;
+ uint16 module; /* bitmap */
+ uint8 level;
+ uint8 flag;
+ uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+ uint32 time;
+ uint16 module;
+ uint16 id;
+ uint8 level;
+ uint8 sub_unit;
+ uint8 seq_num;
+ int32 arg;
+ char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+ uint32 from_last;
+ uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+ uint16 id;
+ uint16 flag;
+ uint8 arg_type;
+ const char *fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER 1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+ FMTSTR_DRIVER_UP_ID = 0,
+ FMTSTR_DRIVER_DOWN_ID = 1,
+ FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+ FMTSTR_NO_PROGRESS_ID = 3,
+ FMTSTR_RFDISABLE_ID = 4,
+ FMTSTR_REG_PRINT_ID = 5,
+ FMTSTR_EXPTIME_ID = 6,
+ FMTSTR_JOIN_START_ID = 7,
+ FMTSTR_JOIN_COMPLETE_ID = 8,
+ FMTSTR_NO_NETWORKS_ID = 9,
+ FMTSTR_SECURITY_MISMATCH_ID = 10,
+ FMTSTR_RATE_MISMATCH_ID = 11,
+ FMTSTR_AP_PRUNED_ID = 12,
+ FMTSTR_KEY_INSERTED_ID = 13,
+ FMTSTR_DEAUTH_ID = 14,
+ FMTSTR_DISASSOC_ID = 15,
+ FMTSTR_LINK_UP_ID = 16,
+ FMTSTR_LINK_DOWN_ID = 17,
+ FMTSTR_RADIO_HW_OFF_ID = 18,
+ FMTSTR_RADIO_HW_ON_ID = 19,
+ FMTSTR_EVENT_DESC_ID = 20,
+ FMTSTR_PNP_SET_POWER_ID = 21,
+ FMTSTR_RADIO_SW_OFF_ID = 22,
+ FMTSTR_RADIO_SW_ON_ID = 23,
+ FMTSTR_PWD_MISMATCH_ID = 24,
+ FMTSTR_FATAL_ERROR_ID = 25,
+ FMTSTR_AUTH_FAIL_ID = 26,
+ FMTSTR_ASSOC_FAIL_ID = 27,
+ FMTSTR_IBSS_FAIL_ID = 28,
+ FMTSTR_EXTAP_FAIL_ID = 29,
+ FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+ uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */
+ uint32 offset; /* offset into overlay region to write code */
+ uint32 len; /* overlay code len */
+ /* overlay code follows this struct */
+} wl_ioctl_overlay_t;
+
+#define OVERLAY_IDX_MASK 0x000000ff
+#define OVERLAY_IDX_SHIFT 0
+#define OVERLAY_FLAGS_MASK 0xffffff00
+#define OVERLAY_FLAGS_SHIFT 8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD 0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL 0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP 0x400
+
+#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
+#endif /* DONGLEOVERLAYS */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN 4 /* length of the set command string:
+ * "add", "del" (+ NUL)
+ */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */
+
+#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ vndr_ie_t vndr_ie_data; /* vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int iecount; /* number of entries in the vndr_ie_list[] array */
+ vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */
+ vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ tlv_t ie_data; /* IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int iecount; /* number of entries in the ie_list[] array */
+ ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */
+ ie_buf_t ie_buffer; /* buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ uint8 id; /* IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+ struct ether_addr staAddr;
+ uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+ sta_prbreq_wps_ie_hdr_t hdr;
+ uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+ uint32 totLen;
+ uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char dest[ETHER_ADDR_LEN]; /* destination MAC */
+ uint8 prio; /* Packet Priority */
+ uint8 flags; /* Flags */
+ uint32 tsf_l; /* TSF timer low */
+ uint32 tsf_h; /* TSF timer high */
+ uint16 rates; /* Main Rates */
+ uint16 txstatus; /* TX Status */
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif /* WLMEDIA_TXFAILEVENT */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* no strict structure packing */
+#include <packed_section_end.h>
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER 0x0100
+#define MAX_ASSRTSTR_LEN 64
+
+typedef struct assert_record {
+ uint32 time;
+ uint8 seq_num;
+ char str[MAX_ASSRTSTR_LEN];
+} assert_record_t;
+
+typedef struct assertlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ assert_record_t logs[1];
+} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN 8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+#ifdef BCMWAPI_WAI
+#define IV_LEN 16
+struct wapi_sta_msg_t
+{
+ uint16 msg_type;
+ uint16 datalen;
+ uint8 vap_mac[6];
+ uint8 reserve_data1[2];
+ uint8 sta_mac[6];
+ uint8 reserve_data2[2];
+ uint8 gsn[IV_LEN];
+ uint8 wie[256];
+};
+#endif /* BCMWAPI_WAI */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE 0 /* disabled */
+#define CHANIM_DETECT 1 /* detection only */
+#define CHANIM_EXT 2 /* external state machine */
+#define CHANIM_ACT 3 /* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT 0
+#define APCS_IOCTL 1
+#define APCS_CHANIM 2
+#define APCS_CSTIMER 3
+#define APCS_BTA 4
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD 10
+
+/* CHANIM */
+#define CCASTATS_TXDUR 0
+#define CCASTATS_INBSS 1
+#define CCASTATS_OBSS 2
+#define CCASTATS_NOCTG 3
+#define CCASTATS_NOPKT 4
+#define CCASTATS_DOZE 5
+#define CCASTATS_TXOP 6
+#define CCASTATS_GDTXDUR 7
+#define CCASTATS_BDTXDUR 8
+#define CCASTATS_MAX 9
+
+/* chanim acs record */
+typedef struct {
+ bool valid;
+ uint8 trigger;
+ chanspec_t selected_chspc;
+ int8 bgnoise;
+ uint32 glitch_cnt;
+ uint8 ccastats;
+ uint timestamp;
+} chanim_acs_record_t;
+
+typedef struct {
+ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+ uint8 count;
+ uint timestamp;
+} wl_acs_record_t;
+
+typedef struct chanim_stats {
+ uint32 glitchcnt; /* normalized as per second count */
+ uint32 badplcp; /* normalized as per second count */
+ uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */
+ int8 bgnoise; /* background noise level (in dBm) */
+ chanspec_t chanspec;
+ uint32 timestamp;
+} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 1
+#define WL_CHANIM_COUNT_ALL 0xff
+#define WL_CHANIM_COUNT_ONE 0x1
+
+typedef struct {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/* Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE 0x1
+
+/* scb probe parameter */
+typedef struct {
+ uint32 scb_timeout;
+ uint32 scb_activity_time;
+ uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/* selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+ uint32 count;
+ uint16 code; /* SC or RC code */
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+ uint32 version;
+ uint16 length; /* reserved for future usage */
+ uint8 type;
+ uint8 codetype;
+ uint32 ignored_cnt;
+ uint32 malformed_cnt;
+ uint32 count_total; /* count included the interested group */
+ wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+ SMFS_CODETYPE_SC,
+ SMFS_CODETYPE_RC
+};
+
+/* reuse two number in the sc/rc space */
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED 0xFFFD
+
+typedef enum smfs_type {
+ SMFS_TYPE_AUTH,
+ SMFS_TYPE_ASSOC,
+ SMFS_TYPE_REASSOC,
+ SMFS_TYPE_DISASSOC_TX,
+ SMFS_TYPE_DISASSOC_RX,
+ SMFS_TYPE_DEAUTH_TX,
+ SMFS_TYPE_DEAUTH_RX,
+ SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+ /* Tx IQ/LO calibration coeffs */
+ int16 tx_iqlocal_a;
+ int16 tx_iqlocal_b;
+ int8 tx_iqlocal_ci;
+ int8 tx_iqlocal_cq;
+ int8 tx_iqlocal_di;
+ int8 tx_iqlocal_dq;
+ int8 tx_iqlocal_ei;
+ int8 tx_iqlocal_eq;
+ int8 tx_iqlocal_fi;
+ int8 tx_iqlocal_fq;
+
+ /* Rx IQ calibration coeffs */
+ int16 rx_iqcal_a;
+ int16 rx_iqcal_b;
+
+ uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */
+ uint32 papd_epsilon_table[64]; /* PAPD epsilon table */
+ int16 papd_epsilon_offset; /* PAPD epsilon offset */
+ uint8 curr_tx_pwrindex; /* Tx power index */
+ int8 idle_tssi; /* Idle TSSI */
+ int8 est_tx_pwr; /* Estimated Tx Power (dB) */
+ int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */
+ uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */
+ uint16 init_gaincode; /* initgain required for ACI */
+ int8 estirr_tx;
+ int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+ int version;
+ int8 num_phy_cores; /* number of cores */
+ int8 curr_temperature; /* on-chip temperature sensor reading */
+ chanspec_t chspec; /* channspec for this state */
+ bool aci_state; /* ACI state: ON/OFF */
+ uint16 crsminpower; /* crsminpower required for ACI */
+ uint16 crsminpowerl; /* crsminpowerl required for ACI */
+ uint16 crsminpoweru; /* crsminpoweru required for ACI */
+ wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif /* PHYMON */
+
+/* discovery state */
+typedef struct wl_p2p_disc_st {
+ uint8 state; /* see state */
+ chanspec_t chspec; /* valid in listen state */
+ uint16 dwell; /* valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN 1
+#define WL_P2P_DISC_ST_SEARCH 2
+
+/* scan request */
+typedef struct wl_p2p_scan {
+ uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */
+ uint8 reserved[3];
+ /* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/* i/f request */
+typedef struct wl_p2p_if {
+ struct ether_addr addr;
+ uint8 type; /* see i/f type */
+ chanspec_t chspec; /* for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT 0
+#define WL_P2P_IF_GO 1
+#define WL_P2P_IF_DYNBCN_GO 2
+#define WL_P2P_IF_DEV 3
+
+/* i/f query */
+typedef struct wl_p2p_ifq {
+ uint bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/* OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+ uint8 ops; /* 0: disable 1: enable */
+ uint8 ctw; /* >= 10 */
+} wl_p2p_ops_t;
+
+/* absence and presence request */
+typedef struct wl_p2p_sched_desc {
+ uint32 start;
+ uint32 interval;
+ uint32 duration;
+ uint32 count; /* see count */
+} wl_p2p_sched_desc_t;
+
+/* count */
+#define WL_P2P_SCHED_RSVD 0
+#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
+
+typedef struct wl_p2p_sched {
+ uint8 type; /* see schedule type */
+ uint8 action; /* see schedule action */
+ uint8 option; /* see schedule option */
+ wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+#define WL_P2P_SCHED_FIXED_LEN 3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
+#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with
+ * start being an offset of the 'current' TSF
+ */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe
+ * requests
+ */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */
+
+#ifdef WLNIC
+/* nic_cnx iovar */
+typedef struct wl_nic_cnx {
+ uint8 opcode;
+ struct ether_addr addr;
+ /* the following are valid for WL_NIC_CNX_CONN */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct ether_addr abssid;
+ uint8 join_period;
+} wl_nic_cnx_t;
+
+/* opcode */
+#define WL_NIC_CNX_ADD 0 /* add NIC connection */
+#define WL_NIC_CNX_DEL 1 /* delete NIC connection */
+#define WL_NIC_CNX_IDX 2 /* query NIC connection index */
+#define WL_NIC_CNX_CONN 3 /* join/create network */
+#define WL_NIC_CNX_DIS 4 /* disconnect from network */
+
+/* nic_cfg iovar */
+typedef struct wl_nic_cfg {
+ uint8 version;
+ uint8 beacon_mode;
+ uint16 beacon_interval;
+ uint8 diluted_beacon_period;
+ uint8 repeat_EQC;
+ uint8 scan_length;
+ uint8 scan_interval;
+ uint8 scan_probability;
+ uint8 awake_window_length;
+ int8 TSF_correction;
+ uint8 ASID;
+ uint8 channel_usage_mode;
+} wl_nic_cfg_t;
+
+/* version */
+#define WL_NIC_CFG_VER 1
+
+/* beacon_mode */
+#define WL_NIC_BCN_NORM 0
+#define WL_NIC_BCN_DILUTED 1
+
+/* channel_usage_mode */
+#define WL_NIC_CHAN_STATIC 0
+#define WL_NIC_CHAN_CYCLE 1
+
+/* nic_cfg iovar */
+typedef struct wl_nic_frm {
+ uint8 type;
+ struct ether_addr da;
+ uint8 body[1];
+} wl_nic_frm_t;
+
+/* type */
+#define WL_NIC_FRM_MYNET 1
+#define WL_NIC_FRM_ACTION 2
+
+/* i/f query */
+typedef struct wl_nic_ifq {
+ uint bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_nic_ifq_t;
+
+/* data mode */
+/* nic_dm iovar */
+typedef struct wl_nic_dm {
+ uint8 enab;
+ chanspec_t chspec;
+} wl_nic_dm_t;
+#endif /* WLNIC */
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE 0x77
+#define BCM_ACTION_RFAWARE_DCS 0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR 0x1
+#define BCM_DCS_UNKNOWN 0xFF
+
+typedef struct wl_bcmdcs_data {
+ uint reason;
+ chanspec_t chspec;
+} wl_bcmdcs_data_t;
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED 0
+#define PSTA_MODE_PROXY 1
+#define PSTA_MODE_REPEATER 2
+
+
+/* NAT configuration */
+typedef struct {
+ uint32 ipaddr; /* interface ip address */
+ uint32 ipaddr_mask; /* interface ip address mask */
+ uint32 ipaddr_gateway; /* gateway ip address */
+ uint8 mac_gateway[6]; /* gateway mac address */
+ uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */
+ uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */
+ uint8 GUID[38]; /* interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+ uint op; /* operation code */
+ bool pub_if; /* set for public if, clear for private if */
+ nat_if_info_t if_info; /* interface info */
+} nat_cfg_t;
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
+#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED 1 /* NAT is enabled */
+#define NAT_STATE_DISABLED 2 /* NAT is disabled */
+
+typedef struct {
+ int state; /* NAT state returned */
+} nat_state_t;
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS 0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040
+#endif /* PROP_TXSTATUS */
+
+#define BTA_STATE_LOG_SZ 64
+
+/* BTAMP Statemachine states */
+enum {
+ HCIReset = 1,
+ HCIReadLocalAMPInfo,
+ HCIReadLocalAMPASSOC,
+ HCIWriteRemoteAMPASSOC,
+ HCICreatePhysicalLink,
+ HCIAcceptPhysicalLinkRequest,
+ HCIDisconnectPhysicalLink,
+ HCICreateLogicalLink,
+ HCIAcceptLogicalLink,
+ HCIDisconnectLogicalLink,
+ HCILogicalLinkCancel,
+ HCIAmpStateChange,
+ HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+ uint32 txfifobmp;
+ uint32 hwtxfifoflush;
+ struct ether_addr ea;
+} flush_txfifo_t;
+
+#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */
+
+enum {
+ SPATIAL_MODE_2G_IDX = 0,
+ SPATIAL_MODE_5G_LOW_IDX,
+ SPATIAL_MODE_5G_MID_IDX,
+ SPATIAL_MODE_5G_HIGH_IDX,
+ SPATIAL_MODE_5G_UPPER_IDX,
+ SPATIAL_MODE_MAX_IDX
+};
+
+/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+ int num; /* Number of memory pools */
+ bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER 0x0001
+#define IPV4_NETBT_FILTER 0x0002
+#define IPV4_LLMNR_FILTER 0x0004
+#define IPV4_SSDP_FILTER 0x0008
+#define IPV4_WSD_FILTER 0x0010
+#define IPV6_NETBT_FILTER 0x0200
+#define IPV6_LLMNR_FILTER 0x0400
+#define IPV6_SSDP_FILTER 0x0800
+#define IPV6_WSD_FILTER 0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE 0x00000001
+
+typedef struct {
+ uint32 ipaddr;
+ uint32 ipaddr_netmask;
+ uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Only support traffic clasification */
+#define TRF_MGMT_FLAG_DISABLE_PRIORITY_TAGGING 0x0004 /* Don't override packet's priority */
+
+/* Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+ trf_mgmt_priority_low = 0, /* Maps to 802.1p BO */
+ trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */
+ trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */
+ trf_mgmt_priority_invalid = (trf_mgmt_priority_high + 1)
+} trf_mgmt_priority_class_t;
+
+/* Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+ uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */
+ uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */
+ uint32 host_ip_addr; /* My IP address to determine subnet */
+ uint32 host_subnet_mask; /* My subnet mask */
+ uint32 downlink_bandwidth; /* In units of kbps */
+ uint32 uplink_bandwidth; /* In units of kbps */
+ uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed tx bandwidth */
+ uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/* Traffic management filter */
+typedef struct trf_mgmt_filter {
+ struct ether_addr dst_ether_addr; /* His L2 address */
+ uint32 dst_ip_addr; /* His IP address */
+ uint16 dst_port; /* His L4 port */
+ uint16 src_port; /* My L4 port */
+ uint16 prot; /* L4 protocol (only TCP or UDP) */
+ uint16 flags; /* TBD. For now, this must be zero. */
+ trf_mgmt_priority_class_t priority; /* Priority for filtered packets */
+} trf_mgmt_filter_t;
+
+/* Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list {
+ uint32 num_filters;
+ trf_mgmt_filter_t filter[1];
+} trf_mgmt_filter_list_t;
+
+/* Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+ uint32 maximum_bytes_per_second;
+ uint32 maximum_bytes_per_sampling_period;
+ uint32 total_bytes_consumed_per_second;
+ uint32 total_bytes_consumed_per_sampling_period;
+ uint32 total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/* Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+ uint32 gauranteed_bandwidth_percentage;
+ uint32 guaranteed_bytes_per_second;
+ uint32 guaranteed_bytes_per_sampling_period;
+ uint32 num_bytes_produced_per_second;
+ uint32 num_bytes_consumed_per_second;
+ uint32 num_queued_packets; /* Number of packets in queue */
+ uint32 num_queued_bytes; /* Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/* Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+ trf_mgmt_global_info_t tx_global_shaping_info;
+ trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_global_info_t rx_global_shaping_info;
+ trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/* Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+ uint32 num_processed_packets; /* Number of packets processed */
+ uint32 num_processed_bytes; /* Number of bytes processed */
+ uint32 num_discarded_packets; /* Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/* Traffic management statisics array */
+typedef struct trf_mgmt_stats_array {
+ trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+typedef struct powersel_params {
+ /* LPC Params exposed via IOVAR */
+ int32 tp_ratio_thresh; /* Throughput ratio threshold */
+ uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */
+ uint8 pwr_stab_thresh; /* Number of successes before power step down */
+ uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */
+} powersel_params_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#endif /* _wlioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 000000000000..ef9c733742af
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,1053 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c 311099 2012-01-27 14:46:59Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+#ifdef BCMASSERT_LOG
+#include <bcm_assert_log.h>
+#endif
+
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd
+#define BCM_MEM_FILENAME_LEN 24
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define STATIC_BUF_MAX_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+ struct semaphore static_sem;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define STATIC_PKT_MAX_NUM 8
+
+typedef struct bcm_static_pkt {
+ struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+ struct semaphore osl_pkt_sem;
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+#endif
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ void *osh;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+#ifdef CTFPOOL
+ ctfpool_t *ctfpool;
+#endif
+ uint magic;
+ void *pdev;
+ atomic_t malloced;
+ uint failed;
+ uint bustype;
+ bcm_mem_link_t *dbgmem_list;
+ spinlock_t dbgmem_lock;
+ spinlock_t pktalloc_lock;
+};
+
+
+
+
+uint32 g_assert_type = FALSE;
+
+static int16 linuxbcmerrormap[] =
+{ 0,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -E2BIG,
+ -E2BIG,
+ -EBUSY,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EFAULT,
+ -ENOMEM,
+ -EOPNOTSUPP,
+ -EMSGSIZE,
+ -EINVAL,
+ -EPERM,
+ -ENOMEM,
+ -EINVAL,
+ -ERANGE,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -EIO,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -ENODATA,
+
+
+
+#if BCME_LAST != -42
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+
+
+int
+osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+
+ return linuxbcmerrormap[-bcmerror];
+}
+
+extern uint8* dhd_os_prealloc(void *osh, int section, int size);
+
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ osl_t *osh;
+
+ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ atomic_set(&osh->malloced, 0);
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ spin_lock_init(&(osh->dbgmem_lock));
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ case PCMCIA_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case JTAG_BUS:
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ case RPC_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+ if (!bcm_static_buf) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
+ STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ }
+ else
+ printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+ sema_init(&bcm_static_buf->static_sem, 1);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
+
+ if (!bcm_static_skb) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
+ for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
+ bcm_static_skb->pkt_use[i] = 0;
+
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+ }
+#endif
+
+ spin_lock_init(&(osh->pktalloc_lock));
+
+ return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+static struct sk_buff *osl_alloc_skb(unsigned int len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ gfp_t flags = GFP_ATOMIC;
+
+ return __dev_alloc_skb(len, flags);
+#else
+ return dev_alloc_skb(len);
+#endif
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
+#endif
+
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+
+ if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+
+ skb = osl_alloc_skb(osh->ctfpool->obj_size);
+ if (skb == NULL) {
+ printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+ osh->ctfpool->obj_size);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+
+ skb->next = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = skb;
+ osh->ctfpool->fast_frees++;
+ osh->ctfpool->curr_obj++;
+
+
+ CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+
+ PKTFAST(osh, skb) = FASTBUF;
+
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ return skb;
+}
+
+
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+
+ while ((osh->ctfpool->refills > 0) && (thresh--)) {
+ osl_ctfpool_add(osh);
+ osh->ctfpool->refills--;
+ }
+}
+
+
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+ osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
+ ASSERT(osh->ctfpool);
+ bzero(osh->ctfpool, sizeof(ctfpool_t));
+
+ osh->ctfpool->max_obj = numobj;
+ osh->ctfpool->obj_size = size;
+
+ spin_lock_init(&osh->ctfpool->lock);
+
+ while (numobj--) {
+ if (!osl_ctfpool_add(osh))
+ return -1;
+ osh->ctfpool->fast_frees--;
+ }
+
+ return 0;
+}
+
+
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+ struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+
+ skb = osh->ctfpool->head;
+
+ while (skb != NULL) {
+ nskb = skb->next;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ osh->ctfpool->curr_obj--;
+ }
+
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->head = NULL;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ kfree(osh->ctfpool);
+ osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+ struct bcmstrbuf *bb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+
+ bb = b;
+
+ ASSERT((osh != NULL) && (bb != NULL));
+
+ bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+ osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+ osh->ctfpool->curr_obj, osh->ctfpool->refills);
+ bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+ osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+ osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+
+ if (osh->ctfpool == NULL)
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ if (osh->ctfpool->head == NULL) {
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->slow_allocs++;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ ASSERT(len <= osh->ctfpool->obj_size);
+
+
+ skb = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = (void *)skb->next;
+
+ osh->ctfpool->fast_allocs++;
+ osh->ctfpool->curr_obj--;
+ ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+
+ skb->next = skb->prev = NULL;
+ skb->data = skb->head + 16;
+ skb->tail = skb->head + 16;
+
+ skb->len = 0;
+ skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+ skb->list = NULL;
+#endif
+ atomic_set(&skb->users, 1);
+
+ return skb;
+}
+#endif
+
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+#ifndef WL_UMK
+ struct sk_buff *nskb;
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
+
+#ifndef WL_UMK
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced--;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+#endif
+ return (struct sk_buff *)pkt;
+}
+
+
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+{
+#ifndef WL_UMK
+ struct sk_buff *nskb;
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
+
+#ifndef WL_UMK
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+#endif
+ return (void *)pkt;
+}
+
+
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+#ifdef CTFPOOL
+
+ skb = osl_pktfastget(osh, len);
+ if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
+#else
+ if ((skb = osl_alloc_skb(len))) {
+#endif
+ skb_put(skb, len);
+ skb->priority = 0;
+
+
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+
+ return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+ ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ skb->tstamp.tv.sec = 0;
+#else
+ skb->stamp.tv_sec = 0;
+#endif
+
+
+ skb->dev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->ip_summed = 0;
+ skb->destructor = NULL;
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+
+
+ CTFPOOL_LOCK(ctfpool, flags);
+ skb->next = (struct sk_buff *)ctfpool->head;
+ ctfpool->head = (void *)skb;
+
+ ctfpool->fast_frees++;
+ ctfpool->curr_obj++;
+
+ ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+ CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif
+
+
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+ unsigned long flags;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+ PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+
+
+#ifdef CTFPOOL
+ if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
+ osl_pktfastfree(osh, skb);
+ else {
+#else
+ {
+#endif
+
+ if (skb->destructor)
+
+ dev_kfree_skb_any(skb);
+ else
+
+ dev_kfree_skb(skb);
+ }
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced--;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ skb = nskb;
+ }
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i = 0;
+ struct sk_buff *skb;
+
+ if (len > (PAGE_SIZE*2)) {
+ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ return osl_pktget(osh, len);
+ }
+
+ down(&bcm_static_skb->osl_pkt_sem);
+
+ if (len <= PAGE_SIZE) {
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_4k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+ }
+
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_8k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+ int i;
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_4k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ return osl_pktfree(osh, p, send);
+}
+#endif
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+
+ return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+
+
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ int i = 0;
+ if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+ {
+ down(&bcm_static_buf->static_sem);
+
+ for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+ {
+ if (bcm_static_buf->buf_use[i] == 0)
+ break;
+ }
+
+ if (i == STATIC_BUF_MAX_NUM)
+ {
+ up(&bcm_static_buf->static_sem);
+ printk("all static buff in use!\n");
+ goto original;
+ }
+
+ bcm_static_buf->buf_use[i] = 1;
+ up(&bcm_static_buf->static_sem);
+
+ bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+ if (osh)
+ atomic_add(size, &osh->malloced);
+
+ return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+ }
+ }
+original:
+#endif
+
+ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ atomic_add(size, &osh->malloced);
+
+ return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+ <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+ {
+ int buf_idx = 0;
+
+ buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+ down(&bcm_static_buf->static_sem);
+ bcm_static_buf->buf_use[buf_idx] = 0;
+ up(&bcm_static_buf->static_sem);
+
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->malloced);
+ }
+ return;
+ }
+ }
+#endif
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->malloced);
+ }
+ kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (atomic_read(&osh->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+ return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
+{
+ uint16 align = (1 << align_bits);
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+ *alloced = size;
+
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+ char tempbuf[256];
+ const char *basename;
+
+ basename = strrchr(file, '/');
+
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+#ifdef BCMASSERT_LOG
+ snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+ exp, basename, line);
+
+ bcm_assert_log(tempbuf);
+#endif
+
+
+}
+#endif
+
+void
+osl_delay(uint usec)
+{
+ uint d;
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+ void * p;
+ unsigned long irqflags;
+
+
+ PKTCTFMAP(osh, skb);
+
+ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+ return NULL;
+
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb)) {
+ ctfpool_t *ctfpool;
+
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+ PKTCLRFAST(osh, p);
+ PKTCLRFAST(osh, skb);
+ ctfpool->refills++;
+ }
+#endif
+
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+
+ spin_lock_irqsave(&osh->pktalloc_lock, irqflags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, irqflags);
+ return (p);
+}
+
+
+
+
+
+
+
+void *
+osl_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 000000000000..68cfcb27a9c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,1001 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c 310902 2012-01-26 19:45:33Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+ uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define SET_SBREG(sii, r, mask, val) \
+ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
+#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
+#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+ uint8 tmp;
+ uint32 val, intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ val = R_REG(sii->osh, sbr);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+ uint8 tmp;
+ volatile uint32 dummy;
+ uint32 intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+ dummy = R_REG(sii->osh, sbr);
+ BCM_REFERENCE(dummy);
+ W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+ dummy = R_REG(sii->osh, sbr);
+ BCM_REFERENCE(dummy);
+ W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+ } else
+ W_REG(sii->osh, sbr, v);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+ si_info_t *sii;
+ void *corereg;
+ sbconfig_t *sb;
+ uint origidx, intflag, intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+ corereg = si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(corereg != NULL);
+ sb = REGS2SB(corereg);
+ intflag = R_SBREG(sii, &sb->sbflagst);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+
+ return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 vec;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ if (siflag == -1)
+ vec = 0;
+ else
+ vec = 1 << siflag;
+ W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+ uint i;
+
+ for (i = 0; i < sii->numcores; i ++)
+ if (sba == sii->coresba[i])
+ return i;
+ return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+ uint32 sbaddr;
+
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS: {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+ break;
+ }
+
+ case PCI_BUS:
+ sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = 0;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ sbaddr = (uint32)tmp << 12;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ sbaddr |= (uint32)tmp << 16;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ sbaddr |= (uint32)tmp << 24;
+ break;
+ }
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sbaddr = (uint32)(uintptr)sii->curmap;
+ break;
+
+
+ default:
+ sbaddr = BADCOREADDR;
+ break;
+ }
+
+ return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint sbidh;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+ sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+ return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+ }
+
+ /* return the new value
+ * for write operation, the following readback ensures the completion of write opration.
+ */
+ return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+ (val << SBTMH_SISF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatehigh, w);
+ }
+
+ /* return the new value */
+ return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbtmstatelow) &
+ (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ if (regoff >= SBCONFIGOFF) {
+ w = (R_SBREG(sii, r) & ~mask) | val;
+ W_SBREG(sii, r, w);
+ } else {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+ }
+
+ /* readback */
+ if (regoff >= SBCONFIGOFF)
+ w = R_SBREG(sii, r);
+ else {
+ if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+ (coreidx == SI_CC_IDX) &&
+ (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+ w = val;
+ } else
+ w = R_REG(sii->osh, r);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ sb_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES 2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+ uint next;
+ uint ncc = 0;
+ uint i;
+
+ if (bus >= SB_MAXBUSES) {
+ SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+ return 0;
+ }
+ SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+ /* Scan all cores on the bus starting from core 0.
+ * Core addresses must be contiguous on each bus.
+ */
+ for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+ sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+ /* keep and reuse the initial register mapping */
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
+ SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+ sii->regs[next] = regs;
+ }
+
+ /* change core to 'next' and read its coreid */
+ sii->curmap = _sb_setcoreidx(sii, next);
+ sii->curidx = next;
+
+ sii->coreid[next] = sb_coreid(&sii->pub);
+
+ /* core specific processing... */
+ /* chipc provides # cores */
+ if (sii->coreid[next] == CC_CORE_ID) {
+ chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+ uint32 ccrev = sb_corerev(&sii->pub);
+
+ /* determine numcores - this is the total # cores in the chip */
+ if (((ccrev == 4) || (ccrev >= 6)))
+ numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+ CID_CC_SHIFT;
+ else {
+ /* Older chips */
+ uint chip = CHIPID(sii->pub.chip);
+
+ if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
+ numcores = 6;
+ else if (chip == BCM4704_CHIP_ID)
+ numcores = 9;
+ else if (chip == BCM5365_CHIP_ID)
+ numcores = 7;
+ else {
+ SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+ chip));
+ ASSERT(0);
+ numcores = 1;
+ }
+ }
+ SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+ sii->pub.issim ? "QT" : ""));
+ }
+ /* scan bridged SB(s) and add results to the end of the list */
+ else if (sii->coreid[next] == OCP_CORE_ID) {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+ uint nsbcc;
+
+ sii->numcores = next + 1;
+
+ if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+ continue;
+ nsbba &= 0xfffff000;
+ if (_sb_coreidx(sii, nsbba) != BADIDX)
+ continue;
+
+ nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+ if (sbba == SI_ENUM_BASE)
+ numcores -= nsbcc;
+ ncc += nsbcc;
+ }
+ }
+
+ SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+ sii->numcores = i + ncc;
+ return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii;
+ uint32 origsba;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+ /* Save the current core info and validate it later till we know
+ * for sure what is good and what is bad.
+ */
+ origsba = _sb_coresba(sii);
+
+ /* scan all SB(s) starting from SI_ENUM_BASE */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ sii->curmap = _sb_setcoreidx(sii, coreidx);
+ sii->curidx = coreidx;
+
+ return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+ uint32 sbaddr = sii->coresba[coreidx];
+ void *regs;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+ regs = sii->curmap;
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = (sbaddr >> 12) & 0x0f;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ tmp = (sbaddr >> 16) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ tmp = (sbaddr >> 24) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ regs = sii->curmap;
+ break;
+ }
+ case SPI_BUS:
+ case SDIO_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+ sbconfig_t *sb;
+ volatile uint32 *addrm;
+
+ sb = REGS2SB(sii->curmap);
+
+ switch (asidx) {
+ case 0:
+ addrm = &sb->sbadmatch0;
+ break;
+
+ case 1:
+ addrm = &sb->sbadmatch1;
+ break;
+
+ case 2:
+ addrm = &sb->sbadmatch2;
+ break;
+
+ case 3:
+ addrm = &sb->sbadmatch3;
+ break;
+
+ default:
+ SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+ return 0;
+ }
+
+ return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ /* + 1 because of enumeration space */
+ return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ origidx = sii->curidx;
+ ASSERT(GOODIDX(origidx));
+
+ INTR_OFF(sii, intr_val);
+
+ /* switch over to chipcommon core if there is one, else use pci */
+ if (sii->pub.ccrev != NOREV) {
+ chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(ccregs != NULL);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+ W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+ } else
+ ASSERT(0);
+
+ /* restore core index */
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /* if core is already in reset, just return */
+ if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+ return;
+
+ /* if clocks are not enabled, put into reset and return */
+ if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+ goto disable;
+
+ /* set target reject and spin until busy is clear (preserve core-specific bits) */
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+ SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+ OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+ dummy = R_SBREG(sii, &sb->sbimstate);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+
+ /* don't forget to clear the initiator reject bit */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+ AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+ /* leave reset and reject asserted */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ sb_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+
+ /* set reset while enabling the clock and forcing them on throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+ W_SBREG(sii, &sb->sbtmstatehigh, 0);
+ }
+ if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ /* leave clock enabled */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ * SI_BUS => mips
+ * JTAG_BUS => chipc
+ * PCI_BUS => pci or pcie
+ * PCMCIA_BUS => pcmcia
+ * SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 tmp, ret = 0xffffffff;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ if ((to & ~TO_MASK) != 0)
+ return ret;
+
+ /* Figure out the master core */
+ if (idx == BADIDX) {
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ idx = sii->pub.buscoreidx;
+ break;
+ case JTAG_BUS:
+ idx = SI_CC_IDX;
+ break;
+ case PCMCIA_BUS:
+ case SDIO_BUS:
+ idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+ break;
+ case SI_BUS:
+ idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+ break;
+ default:
+ ASSERT(0);
+ }
+ if (idx == BADIDX)
+ return ret;
+ }
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+ tmp = R_SBREG(sii, &sb->sbimconfiglow);
+ ret = tmp & TO_MASK;
+ W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+ sb_commit(sih);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+ uint32 base;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ base = 0;
+
+ if (type == 0) {
+ base = admatch & SBAM_BASE0_MASK;
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE1_MASK;
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE2_MASK;
+ }
+
+ return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+ uint32 size;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ size = 0;
+
+ if (type == 0) {
+ size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+ }
+
+ return (size);
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 000000000000..dcd0e389bb70
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,2356 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c 328733 2012-04-20 14:49:55Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs);
+
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+int do_4360_pcie2_war = 0;
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+ return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+ static bool ksii_attached = FALSE;
+
+ if (!ksii_attached) {
+ void *regs;
+ regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+ if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+ SI_BUS, NULL,
+ osh != SI_OSH ? &ksii.vars : NULL,
+ osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+ SI_ERROR(("si_kattach: si_doattach failed\n"));
+ REG_UNMAP(regs);
+ return NULL;
+ }
+ REG_UNMAP(regs);
+
+ /* save ticks normalized to ms for si_watchdog_ms() */
+ if (PMUCTL_ENAB(&ksii.pub)) {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ } else {
+ wd_msticks = ALP_CLOCK / 1000;
+ }
+
+ ksii_attached = TRUE;
+ SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+ ksii.pub.ccrev, wd_msticks));
+ }
+
+ return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+ /* need to set memseg flag for CF card first before any sb registers access */
+ if (BUSTYPE(bustype) == PCMCIA_BUS)
+ sii->memseg = TRUE;
+
+
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval));
+ return FALSE;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ }
+
+
+ return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs)
+{
+ bool pci, pcie, pcie_gen2 = FALSE;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (sii->pub.ccrev >= 35)
+ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = (uint)NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = (uint)NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, sii->coresba[i], sii->regs[i]));
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ if (cid == PCIE2_CORE_ID)
+ pcie_gen2 = TRUE;
+ }
+ } else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+ (cid == PCMCIA_CORE_ID)) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ ((cid == PCMCIA_CORE_ID) ||
+ (cid == SDIOD_CORE_ID))) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->coresba[i])) ||
+ (regs == sii->regs[i]))
+ *origidx = i;
+ }
+
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = pcirev;
+ sii->pub.buscoreidx = pciidx;
+ } else if (pcie) {
+ if (pcie_gen2)
+ sii->pub.buscoretype = PCIE2_CORE_ID;
+ else
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+
+ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+ sii->pub.buscorerev));
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+ (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+ OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+ /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+ * already running.
+ */
+ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+ si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+ si_core_disable(&sii->pub, 0);
+ }
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar*)sii, sizeof(si_info_t));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ if (!regs)
+ return NULL;
+ cc = (chipcregs_t *)regs;
+ } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+ cc = (chipcregs_t *)sii->curmap;
+ } else {
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ }
+
+ sih->bustype = bustype;
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+ bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ if (!cc) {
+ SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+ return NULL;
+ }
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+ (sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+ sih->chippkg = BCM4329_182PIN_PKG_ID;
+ }
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+ SI_MSG(("Found chip type SB (0x%08x)\n", w));
+ sb_scan(&sii->pub, regs, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+ SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+ /* pass chipc address instead of original core base */
+ ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ goto exit;
+ }
+
+ if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+ >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+ CST4322_SPROM_PRESENT))) {
+ SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+ return NULL;
+ }
+
+ /* assume current core is CC */
+ if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+ (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+ if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+ uint clkdiv;
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ /* otp_clk_div is even number, 120/14 < 9mhz */
+ clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+ SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+ }
+ OSL_DELAY(10);
+ }
+
+ if (bustype == PCI_BUS) {
+
+ }
+
+ pvars = NULL;
+ BCM_REFERENCE(pvars);
+
+
+
+ if (sii->pub.ccrev >= 20) {
+ uint32 gpiopullup = 0, gpiopulldown = 0;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ /* 4314/43142 has pin muxing, don't clear gpio bits */
+ if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+ gpiopullup |= 0x402e0;
+ gpiopulldown |= 0x20500;
+ }
+
+ W_REG(osh, &cc->gpiopullup, gpiopullup);
+ W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+ si_setcoreidx(sih, origidx);
+ }
+
+
+ /* clear any previous epidiag-induced target abort */
+ ASSERT(!si_taclear(sih, FALSE));
+
+ return (sii);
+
+exit:
+
+ return NULL;
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+
+
+ sii = SI_INFO(sih);
+
+ if (sii == NULL)
+ return;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ for (idx = 0; idx < SI_MAXCORES; idx++)
+ if (sii->regs[idx]) {
+ REG_UNMAP(sii->regs[idx]);
+ sii->regs[idx] = NULL;
+ }
+
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (sii != &ksii)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_intflag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return R_REG(sii->osh, ((uint32 *)(uintptr)
+ (sii->oob_router + OOB_STATUSA)));
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ sii = SI_INFO(sih);
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (sii->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii;
+ uint found;
+ uint i;
+
+ sii = SI_INFO(sih);
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+
+ return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx))
+ return (NULL);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, idx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+ void *cc;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return (void *)CCREGS_FAST(sii);
+ else if (coreid == sih->buscoretype)
+ return (void *)PCIEREGS(sii);
+ }
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+ return;
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspace(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspacesize(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ /* Only supported for SOCI_AI */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_coreaddrspaceX(sih, asidx, addr, size);
+ else
+ *size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_iscoreup(sih);
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ /* only for AI back plane chips */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return (ai_wrap_reg(sih, offset, mask, val));
+ return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corereg(sih, coreidx, regoff, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_reset(sih, bits, resetbits);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+ switch (x) {
+ case CC_F6_2: return 2;
+ case CC_F6_3: return 3;
+ case CC_F6_4: return 4;
+ case CC_F6_5: return 5;
+ case CC_F6_6: return 6;
+ case CC_F6_7: return 7;
+ default: return 0;
+ }
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS: return (clock);
+ case CC_MC_M1: return (clock / m1);
+ case CC_MC_M1M2: return (clock / (m1 * m2));
+ case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
+ case CC_MC_M1M3: return (clock / (m1 * m3));
+ default: return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+ uint nb, maxt;
+
+ if (PMUCTL_ENAB(sih)) {
+
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+ (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+ si_setcore(sih, USB20D_CORE_ID, 0);
+ si_core_disable(sih, 1);
+ si_setcore(sih, CC_CORE_ID, 0);
+ }
+
+ nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+ /* The mips compiler uses the sllv instruction,
+ * so we specially handle the 32-bit case.
+ */
+ if (nb == 32)
+ maxt = 0xffffffff;
+ else
+ maxt = ((1 << nb) - 1);
+
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+ } else {
+ maxt = (1 << 28) - 1;
+ if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+ }
+}
+
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+ si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+ return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+ return FALSE;
+}
+
+
+
+/* return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+ chipcregs_t *cc;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ if (sii->pub.ccrev < 6) {
+ if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+ PCI_CFG_GPIO_SCS))
+ return (SCC_SS_PCI);
+ else
+ return (SCC_SS_XTAL);
+ } else if (sii->pub.ccrev < 10) {
+ cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+ return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+ } else /* Insta-clock */
+ return (SCC_SS_XTAL);
+}
+
+/* return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+ uint32 slowclk;
+ uint div;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ /* shouldn't be here unless we've established the chip has dynamic clk control */
+ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+ slowclk = si_slowclk_src(sii);
+ if (sii->pub.ccrev < 6) {
+ if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+ else
+ return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+ } else if (sii->pub.ccrev < 10) {
+ div = 4 *
+ (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+ if (slowclk == SCC_SS_LPO)
+ return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+ else if (slowclk == SCC_SS_XTAL)
+ return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+ else if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+ else
+ ASSERT(0);
+ } else {
+ /* Chipc rev 10 is InstaClock */
+ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+ div = 4 * (div + 1);
+ return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+ }
+ return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+ chipcregs_t *cc = (chipcregs_t *)chipcregs;
+ uint slowmaxfreq, pll_delay, slowclk;
+ uint pll_on_delay, fref_sel_delay;
+
+ pll_delay = PLL_DELAY;
+
+ /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+ * since the xtal will also be powered down by dynamic clk control logic.
+ */
+
+ slowclk = si_slowclk_src(sii);
+ if (slowclk != SCC_SS_XTAL)
+ pll_delay += XTAL_ON_DELAY;
+
+ /* Starting with 4318 it is ILP that is used for the delays */
+ slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+ W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+ W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/* initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx = 0;
+ chipcregs_t *cc;
+ bool fast;
+
+ if (!CCCTL_ENAB(sih))
+ return;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+ return;
+ ASSERT(cc != NULL);
+
+ /* set all Instaclk chip ILP to 1 MHz */
+ if (sih->ccrev >= 10)
+ SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+ si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+}
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/*
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ * to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return 0xffffffff;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return 0xffffffff;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+ uint regoff;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ /* gpio led powersave reg */
+ return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (sih->ccrev < 20)
+ return 0xffffffff;
+
+ offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+ bool level, gpio_handler_t cb, void *arg)
+{
+ si_info_t *sii;
+ gpioh_item_t *gi;
+
+ ASSERT(event);
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return NULL;
+
+ if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+ return NULL;
+
+ bzero(gi, sizeof(gpioh_item_t));
+ gi->event = event;
+ gi->handler = cb;
+ gi->arg = arg;
+ gi->level = level;
+
+ gi->next = sii->gpioh_head;
+ sii->gpioh_head = gi;
+
+ return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+ si_info_t *sii;
+ gpioh_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return;
+
+ ASSERT(sii->gpioh_head != NULL);
+ if ((void*)sii->gpioh_head == gpioh) {
+ sii->gpioh_head = sii->gpioh_head->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ } else {
+ p = sii->gpioh_head;
+ n = p->next;
+ while (n) {
+ if ((void*)n == gpioh) {
+ p->next = n->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+
+ ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+ si_info_t *sii;
+ gpioh_item_t *h;
+ uint32 level = si_gpioin(sih);
+ uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+ uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+ uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+ sii = SI_INFO(sih);
+ for (h = sii->gpioh_head; h != NULL; h = h->next) {
+ if (h->handler) {
+ uint32 status = (h->level ? level : edge) & h->event;
+ uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+ /* polarity bitval is opposite of status bitval */
+ if (status ^ polarity)
+ h->handler(status, h->arg);
+ }
+ }
+
+ si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+ uint offs;
+
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ if (!set)
+ *enable = *protect = *remap = 0;
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (set) {
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+ if (*enable) {
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+ if (*protect)
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+ if ((corerev >= 16) && *remap)
+ bankinfo |=
+ (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+ }
+ W_REG(sii->osh, &regs->bankinfo, bankinfo);
+ }
+ else if (i == 0) {
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+ *enable = 1;
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+ *protect = 1;
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+ *remap = 1;
+ }
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup, remap = FALSE;
+ uint corerev;
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+ remap = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+ return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+ if (si_socdevram_size(sih) > 0)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0, banksz;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+ /*
+ * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+ * Only four banks are accessible to ARM
+ */
+ if ((corerev == 16) && (nb == 5))
+ nb = 4;
+
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+ banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+ memsize += banksz;
+ } else {
+ /* Account only consecutive banks for now */
+ break;
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else if ((corerev <= 7) || (corerev == 12)) {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb --;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+ return (32 * 1024);
+ }
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev >= 16) {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++) {
+ W_REG(sii->osh, &regs->bankidx, i);
+ if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ chipcregs_t *cc;
+
+ sii = SI_INFO(sih);
+
+ /* Make sure that there is ChipCommon core present &&
+ * UART_TX is strapped to 1
+ */
+ if (!(sih->cccaps & CC_CAP_UARTGPIO))
+ return;
+
+ /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+ if (on) {
+ /* Enable bt_shd0 on gpio4: */
+ val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ val = R_REG(sii->osh, &cc->chipcontrol);
+ si_setcoreidx(sih, origidx);
+ return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ /* Ext PA Controls for 4331 12x9 Package */
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ /* Ext PA Controls for 4331 12x12 Package */
+ if (sih->chiprev > 0) {
+ W_REG(sii->osh, &cc->chipcontrol, val |
+ (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+ } else {
+ W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+ }
+ }
+ } else {
+ val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* switch muxed pins, on: SROM, off: FEMCTRL */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ val &= ~(CCTRL4360_SECI_MODE |
+ CCTRL4360_BTSWCTRL_MODE |
+ CCTRL4360_EXTRA_FEMCTRL_MODE |
+ CCTRL4360_BT_LGCY_MODE |
+ CCTRL4360_CORE2FEMCTRL4_ON);
+
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ bool sel_chip;
+
+ sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43431_CHIP_ID);
+ sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+ if (!sel_chip)
+ return;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (enter_wowl) {
+ val |= CCTRL4331_EXTPA_EN;
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+ si_setcoreidx(sih, origidx);
+}
+
+uint
+si_pll_reset(si_t *sih)
+{
+ uint err = 0;
+
+ return (err);
+}
+
+/* Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ /* EPA Fix */
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+/* WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+ W_REG(sii->osh, &cc->gpioouten,
+ R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ /* BT fix */
+ W_REG(sii->osh, &cc->chipcontrol,
+ R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+ W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+ uint32 w;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(sii->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ break;
+ }
+ return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+ if (sih->ccrev >= 31) {
+ si_info_t *sii;
+ uint origidx;
+ chipcregs_t *cc;
+ uint32 sromctrl;
+
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return FALSE;
+
+ sii = SI_INFO(sih);
+ origidx = sii->curidx;
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+ si_setcoreidx(sih, origidx);
+ return (sromctrl & SRC_PRESENT);
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4312_CHIP_ID:
+ return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
+ case BCM4325_CHIP_ID:
+ return (sih->chipst & CST4325_SPROM_SEL) != 0;
+ case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID:
+ case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID:
+ case BCM4342_CHIP_ID: {
+ uint32 spromotp;
+ spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+ CST4322_SPROM_OTP_SEL_SHIFT;
+ return (spromotp & CST4322_SPROM_PRESENT) != 0;
+ }
+ case BCM4329_CHIP_ID:
+ return (sih->chipst & CST4329_SPROM_SEL) != 0;
+ case BCM4315_CHIP_ID:
+ return (sih->chipst & CST4315_SPROM_SEL) != 0;
+ case BCM4319_CHIP_ID:
+ return (sih->chipst & CST4319_SPROM_SEL) != 0;
+ case BCM4336_CHIP_ID:
+ case BCM43362_CHIP_ID:
+ return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+ case BCM4330_CHIP_ID:
+ return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ case BCM4331_CHIP_ID:
+ case BCM43431_CHIP_ID:
+ return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+ case BCM43239_CHIP_ID:
+ return ((sih->chipst & CST43239_SPROM_MASK) &&
+ !(sih->chipst & CST43239_SFLASH_MASK));
+ case BCM4324_CHIP_ID:
+ return ((sih->chipst & CST4324_SPROM_MASK) &&
+ !(sih->chipst & CST4324_SFLASH_MASK));
+ case BCM43131_CHIP_ID:
+ case BCM43217_CHIP_ID:
+ case BCM43227_CHIP_ID:
+ case BCM43228_CHIP_ID:
+ case BCM43428_CHIP_ID:
+ return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+ default:
+ return TRUE;
+ }
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 000000000000..9a3270f74242
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,246 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+#define SI_ERROR(args)
+
+#define SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define SI_VMSG(args) printf args
+#else
+#define SI_VMSG(args)
+#endif
+
+#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+ void *arg;
+ bool level;
+ gpio_handler_t handler;
+ uint32 event;
+ struct gpioh_item *next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+ struct si_pub pub; /* back plane public state (must be first field) */
+
+ void *osh; /* osl os handle */
+ void *sdh; /* bcmsdh handle */
+
+ uint dev_coreid; /* the core provides driver functions */
+ void *intr_arg; /* interrupt callback function arg */
+ si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
+ si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+ si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+ void *pch; /* PCI/E core handle */
+
+ gpioh_item_t *gpioh_head; /* GPIO event handlers list */
+
+ bool memseg; /* flag to toggle MEM_SEG register */
+
+ char *vars;
+ uint varsz;
+
+ void *curmap; /* current regs va */
+ void *regs[SI_MAXCORES]; /* other regs va */
+
+ uint curidx; /* current core index */
+ uint numcores; /* # discovered cores */
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
+ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
+ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
+
+ void *curwrap; /* current wrapper va */
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+
+ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 oob_router; /* oob router registers for axi */
+} si_info_t;
+
+#define SI_INFO(sih) (si_info_t *)(uintptr)sih
+
+#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ ISALIGNED((x), SI_CORE_SIZE))
+#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR 0
+#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES)
+#define NOREV -1 /* Invalid rev */
+
+#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /* low power oscillator min */
+#define LPOMAXFREQ 43000 /* low power oscillator max */
+#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /* 25 MHz */
+#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si) \
+ (((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+ ((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b) (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif /* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 000000000000..673dce08aad7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,176 @@
+/*
+ * Name: uamp_api.h
+ *
+ * Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h 294267 2011-11-04 23:41:52Z $
+ *
+ */
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+** Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool BOOLEAN;
+typedef uint8 UINT8;
+typedef uint16 UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1 1
+#define UAMP_ID_2 2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */
+#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */
+#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */
+#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+ tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function: UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters amp_id: AMP device identifier that generated the event
+** amp_evt: event id
+** p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+** external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function: UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters p_cback: Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer. This value
+** will be included in AMP messages sent to the
+** BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function: UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer to write
+** num_bytes: number of bytes to write
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns: number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function: UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+** UAMP_EVT_RX_READY callback event.
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer for holding incoming AMP data
+** buf_size: size of p_buf
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns: number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 000000000000..850694f86f9f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,845 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c 323797 2012-03-27 01:27:20Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <bcmsdbus.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/platform_device.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+#include <linux/wlan_plat.h>
+#else
+#include <linux/wifi_tiwlan.h>
+#endif
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START "START"
+#define CMD_STOP "STOP"
+#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
+#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
+#define CMD_RSSI "RSSI"
+#define CMD_LINKSPEED "LINKSPEED"
+#define CMD_RXFILTER_START "RXFILTER-START"
+#define CMD_RXFILTER_STOP "RXFILTER-STOP"
+#define CMD_RXFILTER_ADD "RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE "BTCOEXMODE"
+#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
+#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
+#define CMD_SETFWPATH "SETFWPATH"
+#define CMD_SETBAND "SETBAND"
+#define CMD_GETBAND "GETBAND"
+#define CMD_COUNTRY "COUNTRY"
+#define CMD_P2P_SET_NOA "P2P_SET_NOA"
+#define CMD_P2P_SET_PS "P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
+
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
+#define CMD_PNOSETUP_SET "PNOSETUP "
+#define CMD_PNOENABLE_SET "PNOFORCE"
+#define CMD_PNODEBUG_SET "PNODEBUG"
+
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBVERSION '2'
+#define PNO_TLV_RESERVED '0'
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cmd_tlv_t;
+#endif /* PNO_SUPPORT */
+
+typedef struct android_wifi_priv_cmd {
+ char *buf;
+ int used_len;
+ int total_len;
+} android_wifi_priv_cmd;
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+void dhd_customer_gpio_wlan_ctrl(int onoff);
+uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+void dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_init(dhd_pub_t *dhd);
+extern void dhd_wlfc_deinit(dhd_pub_t *dhd);
+#endif
+
+extern bool ap_fw_loaded;
+#ifdef CUSTOMER_HW2
+extern char iface_name[IFNAMSIZ];
+#endif
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+ int link_speed;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_link_speed(net, &link_speed);
+ if (error)
+ return -1;
+
+ /* Convert Kbps to Android Mbps */
+ link_speed = link_speed / 1000;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+ DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+ wlc_ssid_t ssid = {0};
+ int rssi;
+ int bytes_written = 0;
+ int error;
+
+ error = wldev_get_rssi(net, &rssi);
+ if (error)
+ return -1;
+
+ error = wldev_get_ssid(net, &ssid);
+ if (error)
+ return -1;
+ if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+ DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+ } else {
+ memcpy(command, ssid.SSID, ssid.SSID_len);
+ bytes_written = ssid.SSID_len;
+ }
+ bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+ DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now)))
+ DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+ __FUNCTION__, ret_now, suspend_flag));
+ else
+ DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+ return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+ uint band;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_band(dev, &band);
+ if (error)
+ return -1;
+ bytes_written = snprintf(command, total_len, "Band %d", band);
+ return bytes_written;
+}
+
+#ifdef PNO_SUPPORT
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+ wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ int res = -1;
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time = 0;
+ int pno_repeat = 0;
+ int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {
+ 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+ 'S', '1', '2', '0',
+ 'S',
+ 0x05,
+ 'd', 'l', 'i', 'n', 'k',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '0', 'B',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif /* PNO_SET_DEBUG */
+
+ DHD_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+ if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+ DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+ goto exit_proc;
+ }
+
+
+#ifdef PNO_SET_DEBUG
+ memcpy(command, pno_in_example, sizeof(pno_in_example));
+ for (i = 0; i < sizeof(pno_in_example); i++)
+ printf("%02X ", command[i]);
+ printf("\n");
+ total_len = sizeof(pno_in_example);
+#endif
+
+ str_ptr = command + strlen(CMD_PNOSETUP_SET);
+ tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+ (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) {
+
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+ DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ } else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ DHD_ERROR(("%s scan duration corrupted field size %d\n",
+ __FUNCTION__, tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ DHD_ERROR(("%s pno repeat : corrupted field\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_freq_expo_max=%d\n",
+ __FUNCTION__, pno_freq_expo_max));
+ }
+ }
+ } else {
+ DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+ return res;
+}
+#endif /* PNO_SUPPORT */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+ int ret;
+ int bytes_written = 0;
+
+ ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+ if (ret)
+ return 0;
+ bytes_written = sizeof(struct ether_addr);
+ return bytes_written;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+ int ret = 0;
+ int retry = POWERUP_MAX_RETRY;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (!g_wifi_on) {
+ do {
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+ ret = sdioh_start(NULL, 0);
+ if (ret == 0)
+ break;
+ DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+ retry+1));
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ } while (retry-- >= 0);
+ if (ret != 0) {
+ DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+ goto exit;
+ }
+ ret = dhd_dev_reset(dev, FALSE);
+ sdioh_start(NULL, 1);
+ dhd_dev_init_ioctl(dev);
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_init(bcmsdh_get_drvdata());
+#endif
+ g_wifi_on = TRUE;
+ }
+
+exit:
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+ int ret = 0;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (g_wifi_on) {
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(bcmsdh_get_drvdata());
+#endif
+ dhd_dev_reset(dev, 1);
+ sdioh_stop(NULL);
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = FALSE;
+ }
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+ if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+ return -1;
+ bcm_strncpy_s(fw_path, sizeof(fw_path),
+ command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1);
+ if (strstr(fw_path, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+ return 0;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+
+ net_os_wake_lock(net);
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
+ if (!command)
+ {
+ DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+ if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+ DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+ bytes_written = wl_android_wifi_on(net);
+ }
+ else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+ bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+ }
+
+ if (!g_wifi_on) {
+ DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __FUNCTION__, command, ifr->ifr_name));
+ ret = 0;
+ goto exit;
+ }
+
+ if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+ bytes_written = wl_android_wifi_off(net);
+ }
+ else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+ /* TBD: SCAN-ACTIVE */
+ }
+ else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+ /* TBD: SCAN-PASSIVE */
+ }
+ else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+ bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+ bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 1);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 0);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+ /* TBD: BTCOEXSCAN-START */
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+ /* TBD: BTCOEXSCAN-STOP */
+ }
+ else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+ uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+ if (mode == 1)
+ net_os_set_packet_filter(net, 0); /* DHCP starts */
+ else
+ net_os_set_packet_filter(net, 1); /* DHCP ends */
+#ifdef WL_CFG80211
+ bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
+#endif
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+ bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+ bytes_written = wldev_set_band(net, band);
+ }
+ else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+ bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+ bytes_written = wldev_set_country(net, country_code);
+ }
+#ifdef PNO_SUPPORT
+ else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+ bytes_written = dhd_dev_pno_reset(net);
+ }
+ else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+ bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+ uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+ bytes_written = dhd_dev_pno_enable(net, pfn_enabled);
+ }
+#endif
+ else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+ bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+ int skip = strlen(CMD_P2P_SET_NOA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+ int skip = strlen(CMD_P2P_SET_PS) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+ strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+ int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+ bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+ priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+ }
+#endif /* WL_CFG80211 */
+ else {
+ DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+ snprintf(command, 3, "OK");
+ bytes_written = strlen("OK");
+ }
+
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+ command[0] = '\0';
+ if (bytes_written >= priv_cmd.total_len) {
+ DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+ bytes_written = priv_cmd.total_len;
+ } else {
+ bytes_written++;
+ }
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+ DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ }
+ else {
+ ret = bytes_written;
+ }
+
+exit:
+ net_os_wake_unlock(net);
+ if (command) {
+ kfree(command);
+ }
+
+ return ret;
+}
+
+int wl_android_init(void)
+{
+ int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+ dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#ifdef CUSTOMER_HW2
+ if (!iface_name[0]) {
+ memset(iface_name, 0, IFNAMSIZ);
+ bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+ }
+#endif /* CUSTOMER_HW2 */
+ return ret;
+}
+
+int wl_android_exit(void)
+{
+ int ret = 0;
+
+ return ret;
+}
+
+void wl_android_post_init(void)
+{
+ if (!dhd_download_fw_on_driverload) {
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = 0;
+ }
+}
+/**
+ * Functions for Android WiFi card detection
+ */
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+
+static int g_wifidev_registered = 0;
+static struct semaphore wifi_control_sem;
+static struct wifi_platform_data *wifi_control_data = NULL;
+static struct resource *wifi_irqres = NULL;
+
+static int wifi_add_dev(void);
+static void wifi_del_dev(void);
+
+int wl_android_wifictrl_func_add(void)
+{
+ int ret = 0;
+ sema_init(&wifi_control_sem, 0);
+
+ ret = wifi_add_dev();
+ if (ret) {
+ DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+ return ret;
+ }
+ g_wifidev_registered = 1;
+
+ /* Waiting callback after platform_driver_register is done or exit with error */
+ if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+
+void wl_android_wifictrl_func_del(void)
+{
+ if (g_wifidev_registered)
+ {
+ wifi_del_dev();
+ g_wifidev_registered = 0;
+ }
+}
+
+void* wl_android_prealloc(int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ if (wifi_control_data && wifi_control_data->mem_prealloc) {
+ alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+ if (alloc_ptr) {
+ DHD_INFO(("success alloc section %d\n", section));
+ if (size != 0L)
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ }
+
+ DHD_ERROR(("can't alloc section %d\n", section));
+ return NULL;
+}
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+ if (wifi_irqres) {
+ *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+ return (int)wifi_irqres->start;
+ }
+#ifdef CUSTOM_OOB_GPIO_NUM
+ return CUSTOM_OOB_GPIO_NUM;
+#else
+ return -1;
+#endif
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_power) {
+ wifi_control_data->set_power(on);
+ }
+ if (msec)
+ msleep(msec);
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+int wifi_get_mac_addr(unsigned char *buf)
+{
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+ if (wifi_control_data && wifi_control_data->get_mac_addr) {
+ return wifi_control_data->get_mac_addr(buf);
+ }
+ return -EOPNOTSUPP;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+void *wifi_get_country_code(char *ccode)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (!ccode)
+ return NULL;
+ if (wifi_control_data && wifi_control_data->get_country_code) {
+ return wifi_control_data->get_country_code(ccode);
+ }
+ return NULL;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+static int wifi_set_carddetect(int on)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_carddetect) {
+ wifi_control_data->set_carddetect(on);
+ }
+ return 0;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+ if (wifi_irqres == NULL)
+ wifi_irqres = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "bcm4329_wlan_irq");
+ wifi_control_data = wifi_ctrl;
+ wifi_set_power(1, 0); /* Power On */
+ wifi_set_carddetect(1); /* CardDetect (0->1) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ DHD_ERROR(("## %s\n", __FUNCTION__));
+ wifi_control_data = wifi_ctrl;
+
+ wifi_set_power(0, 0); /* Power Off */
+ wifi_set_carddetect(0); /* CardDetect (1->0) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
+ bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static int wifi_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
+ if (dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static struct platform_driver wifi_device = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcmdhd_wlan",
+ }
+};
+
+static struct platform_driver wifi_device_legacy = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcm4329_wlan",
+ }
+};
+
+static int wifi_add_dev(void)
+{
+ DHD_TRACE(("## Calling platform_driver_register\n"));
+ platform_driver_register(&wifi_device);
+ platform_driver_register(&wifi_device_legacy);
+ return 0;
+}
+
+static void wifi_del_dev(void)
+{
+ DHD_TRACE(("## Unregister platform_driver_register\n"));
+ platform_driver_unregister(&wifi_device);
+ platform_driver_unregister(&wifi_device_legacy);
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 000000000000..583a16735ecd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,57 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.h 307885 2012-01-12 23:30:48Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+int wl_android_wifictrl_func_add(void);
+void wl_android_wifictrl_func_del(void);
+void* wl_android_prealloc(int section, unsigned long size);
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 000000000000..40f1b2ce4889
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,7332 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c 328984 2012-04-23 14:08:37Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+#ifdef BCMWAPI_WPI
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+
+static struct device *cfg80211_parent_dev = NULL;
+static int vsdb_supported = 0;
+struct wl_priv *wlcfg_drv_priv = NULL;
+
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAX_WAIT_TIME 1500
+#define WL_SCAN_ACTIVE_TIME 40 /* ms : Embedded default Active setting from DHD Driver */
+#define WL_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD Driver */
+#define WL_FRAME_LEN 300
+
+#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
+
+
+#define DNGL_FUNC(func, parameters) func parameters;
+#define COEX_DHCP
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+ .n_reg_rules = 5,
+ .alpha2 = "99",
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+ /* IEEE 802.11b/g, channels 12..13. No HT40
+ * channel fits here.
+ */
+ REG_RULE(2467-10, 2472+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS),
+ /* IEEE 802.11 channel 14 - Only JP enables
+ * this and for 802.11b only
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_OFDM),
+ /* IEEE 802.11a, channel 36..64 */
+ REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid);
+static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac,
+ struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev, bool enabled,
+ s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code);
+static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ s32 dbm);
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ void *cookie, void (*callback) (void *cookie,
+ struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_notify_escan_complete(struct wl_priv *wl,
+ struct net_device *ndev, bool aborted, bool fw_abort);
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct wl_priv *wl);
+static void wl_destroy_event_handler(struct wl_priv *wl);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct wl_priv *wl);
+static void wl_flush_eq(struct wl_priv *wl);
+static unsigned long wl_lock_eq(struct wl_priv *wl);
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags);
+static void wl_init_eq_lock(struct wl_priv *wl);
+static void wl_init_event_handler(struct wl_priv *wl);
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
+static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type,
+ const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct wl_priv *wl);
+static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * wl profile utilities
+ */
+static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, s32 item);
+static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#endif
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+ struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct wl_priv *wl);
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct wl_priv *wl);
+
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev);
+static void wl_free_wdev(struct wl_priv *wl);
+
+static s32 wl_inform_bss(struct wl_priv *wl);
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr,
+ struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * wl_priv memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct wl_priv *wl);
+static void wl_deinit_priv_mem(struct wl_priv *wl);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct wl_priv *wl);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct wl_priv *wl);
+static s32 __wl_cfg80211_down(struct wl_priv *wl);
+static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev);
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
+static void wl_link_up(struct wl_priv *wl);
+static void wl_link_down(struct wl_priv *wl);
+static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_update_wiphybands(struct wl_priv *wl);
+
+/*
+ * iscan handler
+ */
+static void wl_iscan_timer(unsigned long data);
+static void wl_term_iscan(struct wl_priv *wl);
+static s32 wl_init_scan(struct wl_priv *wl);
+static s32 wl_iscan_thread(void *data);
+static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request,
+ u16 action);
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request);
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
+static s32 wl_invoke_iscan(struct wl_priv *wl);
+static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list);
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan);
+static s32 wl_iscan_done(struct wl_priv *wl);
+static s32 wl_iscan_pending(struct wl_priv *wl);
+static s32 wl_iscan_inprogress(struct wl_priv *wl);
+static s32 wl_iscan_aborted(struct wl_priv *wl);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
+ int nprobes, int *out_params_size);
+static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac);
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+
+#define CHECK_SYS_UP(wlpriv) \
+do { \
+ struct net_device *ndev = wl_to_prmry_ndev(wlpriv); \
+ if (unlikely(!wl_get_drv_status(wlpriv, READY, ndev))) { \
+ WL_INFO(("device is not ready\n")); \
+ return -EIO; \
+ } \
+} while (0)
+
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX 50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+ "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+ "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+ "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+ "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+ "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+ "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+ "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+ "PFN_NET_LOST",
+ "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+ "IBSS_ASSOC",
+ "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+ "PROBREQ_MSG",
+ "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+ "EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+ "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+ "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+ "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+ "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+ "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+ "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+ "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+ "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+ "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif /* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+ { \
+ .bitrate = RATE_TO_BASE100KBPS(_rateid), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate __wl_rates[] = {
+ RATETAB_ENT(WLC_RATE_1M, 0),
+ RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_6M, 0),
+ RATETAB_ENT(WLC_RATE_9M, 0),
+ RATETAB_ENT(WLC_RATE_12M, 0),
+ RATETAB_ENT(WLC_RATE_18M, 0),
+ RATETAB_ENT(WLC_RATE_24M, 0),
+ RATETAB_ENT(WLC_RATE_36M, 0),
+ RATETAB_ENT(WLC_RATE_48M, 0),
+ RATETAB_ENT(WLC_RATE_54M, 0)
+};
+
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size 8
+#define wl_g_rates (__wl_rates + 0)
+#define wl_g_rates_size 12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = __wl_2ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+#ifdef BCMWAPI_WPI
+ WLAN_CIPHER_SUITE_SMS4
+#endif
+};
+
+/* IOCtl version read from targeted driver */
+static int ioctl_version;
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+ chanspec_t chspec;
+
+ /* get the channel number */
+ chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+ /* convert the band */
+ if (LCHSPEC_IS2G(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chspec |= WL_CHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (LCHSPEC_IS20(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BW_20;
+ } else {
+ chspec |= WL_CHANSPEC_BW_40;
+ if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+ chspec |= WL_CHANSPEC_CTL_SB_L;
+ } else {
+ chspec |= WL_CHANSPEC_CTL_SB_U;
+ }
+ }
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+ chanspec_t lchspec;
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ /* get the channel number */
+ lchspec = CHSPEC_CHANNEL(chspec);
+
+ /* convert the band */
+ if (CHSPEC_IS2G(chspec)) {
+ lchspec |= WL_LCHANSPEC_BAND_2G;
+ } else {
+ lchspec |= WL_LCHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (CHSPEC_IS20(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_20;
+ lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+ } else if (CHSPEC_IS40(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_40;
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+ lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+ } else {
+ lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+ }
+ } else {
+ /* cannot express the bandwidth */
+ char chanbuf[CHANSPEC_STR_LEN];
+ WL_ERR((
+ "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
+ return INVCHANSPEC;
+ }
+
+ return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_to_legacy(chanspec);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+ }
+ chanspec = htodchanspec(chanspec);
+
+ return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+
+ chanspec_t chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+ chanspec = dtohchanspec(chanspec);
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_from_legacy(chanspec);
+ }
+
+ return chanspec;
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* For debug: Dump the contents of the encoded wps ie buffe */
+static void
+wl_validate_wps_ie(char *wps_ie, bool *pbc)
+{
+ #define WPS_IE_FIXED_LEN 6
+ u16 len = (u16) wps_ie[TLV_LEN_OFF];
+ u8 *subel = wps_ie+ WPS_IE_FIXED_LEN;
+ u16 subelt_id;
+ u16 subelt_len;
+ u16 val;
+ u8 *valptr = (uint8*) &val;
+
+ WL_DBG(("wps_ie len=%d\n", len));
+
+ len -= 4; /* for the WPS IE's OUI, oui_type fields */
+
+ while (len >= 4) { /* must have attr id, attr len fields */
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_id = HTON16(val);
+
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_len = HTON16(val);
+
+ len -= 4; /* for the attr id, attr len fields */
+ len -= subelt_len; /* for the remaining fields in this attribute */
+ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+ subel, subelt_id, subelt_len));
+
+ if (subelt_id == WPS_ID_VERSION) {
+ WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_REQ_TYPE) {
+ WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_DEVICE_NAME) {
+ char devname[100];
+ memcpy(devname, subel, subelt_len);
+ devname[subelt_len] = '\0';
+ WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+ devname, subelt_len));
+ } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+ *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+ } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+ ": cat=%u\n", HTON16(val)));
+ } else {
+ WL_DBG((" unknown attr 0x%x\n", subelt_id));
+ }
+
+ subel += subelt_len;
+ }
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+ if (vsdb_supported) {
+ return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+ else {
+ chanspec_t chspec;
+ int err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct ether_addr bssid;
+ struct wl_bss_info *bss = NULL;
+
+ if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
+ /* STA interface is not associated. So start the new interface on a temp
+ * channel . Later proper channel will be applied by the above framework
+ * via set_channel (cfg80211 API).
+ */
+ WL_DBG(("Not associated. Return a temp channel. \n"));
+ return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+
+
+ *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, wl->extra_buf,
+ sizeof(WL_EXTRA_BUF_MAX), false))) {
+ WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+ chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+ else {
+ bss = (struct wl_bss_info *) (wl->extra_buf + 4);
+ chspec = bss->chanspec;
+ WL_DBG(("Valid BSS Found. chanspec:%d \n", bss->chanspec));
+ }
+ return chspec;
+ }
+}
+
+static struct net_device* wl_cfg80211_add_monitor_if(char *name)
+{
+ struct net_device* ndev = NULL;
+
+ dhd_add_monitor(name, &ndev);
+ WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev;
+}
+
+static struct net_device *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 err;
+ s32 timeout = -1;
+ s32 wlif_type = -1;
+ s32 mode = 0;
+#if defined(WL_ENABLE_P2P_IF)
+ s32 dhd_mode = 0;
+#endif /* (WL_ENABLE_P2P_IF) */
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *_ndev;
+ struct ether_addr primary_mac;
+ int (*net_attach)(void *dhdp, int ifidx);
+ bool rollback_lock = false;
+
+ /* Use primary I/F for sending cmds down to firmware */
+ _ndev = wl_to_prmry_ndev(wl);
+
+ WL_DBG(("if name: %s, type: %d\n", name, type));
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ WL_ERR(("Unsupported interface type\n"));
+ mode = WL_MODE_IBSS;
+ return NULL;
+ case NL80211_IFTYPE_MONITOR:
+ return wl_cfg80211_add_monitor_if(name);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ wlif_type = WL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ wlif_type = WL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ return NULL;
+ break;
+ }
+
+ if (!name) {
+ WL_ERR(("name is NULL\n"));
+ return NULL;
+ }
+ if (wl->iface_cnt == IFACE_MAX_CNT)
+ return ERR_PTR(-ENOMEM);
+ if (wl->p2p_supported && (wlif_type != -1)) {
+ if (wl_get_p2p_status(wl, IF_DELETING)) {
+ /* wait till IF_DEL is complete
+ * release the lock for the unregister to proceed
+ */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n",
+ __func__));
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ /* put back the rtnl_lock again */
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ if (timeout > 0) {
+ WL_ERR(("IF DEL is Success\n"));
+
+ } else {
+ WL_ERR(("timeount < 0, return -EAGAIN\n"));
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+ if (wl->p2p && !wl->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ wl_cfgp2p_init_discovery(wl);
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ }
+
+ memset(wl->p2p->vir_ifname, 0, IFNAMSIZ);
+ strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+ wldev_iovar_setint(_ndev, "mpc", 0);
+ wl_notify_escan_complete(wl, _ndev, true, true);
+
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "wl p2p_ifadd"
+ */
+ wl_set_p2p_status(wl, IF_ADD);
+ err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+
+ if (unlikely(err)) {
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_ADD) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) {
+
+ struct wireless_dev *vwdev;
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return ERR_PTR(-ENOMEM);
+ }
+ vwdev->wiphy = wl->wdev->wiphy;
+ WL_INFO((" virtual interface(%s) is created memalloc done \n",
+ wl->p2p->vir_ifname));
+ vwdev->iftype = type;
+ _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ _ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy));
+ vwdev->netdev = _ndev;
+ wl_set_drv_status(wl, READY, _ndev);
+ wl->p2p->vif_created = true;
+ wl_set_mode_by_netdev(wl, _ndev, mode);
+ net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION);
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ if (net_attach && !net_attach(wl->pub, _ndev->ifindex)) {
+ wl_alloc_netinfo(wl, _ndev, vwdev, mode);
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", wl->p2p->vir_ifname));
+#if defined(WL_ENABLE_P2P_IF)
+ if (type == NL80211_IFTYPE_P2P_CLIENT)
+ dhd_mode = P2P_GC_ENABLED;
+ else if (type == NL80211_IFTYPE_P2P_GO)
+ dhd_mode = P2P_GO_ENABLED;
+ DNGL_FUNC(dhd_cfg80211_set_p2p_info, (wl, dhd_mode));
+#endif /* (WL_ENABLE_P2P_IF) */
+ } else {
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ goto fail;
+ }
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ return _ndev;
+
+ } else {
+ wl_clr_p2p_status(wl, IF_ADD);
+ WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname));
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ wl->p2p->vif_created = false;
+ }
+ }
+fail:
+ return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ether_addr p2p_mac;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 timeout = -1;
+ s32 ret = 0;
+ WL_DBG(("Enter\n"));
+
+ if (wl->p2p_net == dev) {
+ /* Since there is no ifidx corresponding to p2p0, cmds to
+ * firmware should be routed through primary I/F
+ */
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ if (wl->p2p_supported) {
+ memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN);
+ if (wl->p2p->vif_created) {
+ if (wl_get_drv_status(wl, SCANNING, dev)) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ wldev_iovar_setint(dev, "mpc", 1);
+ wl_set_p2p_status(wl, IF_DELETING);
+ ret = wl_cfgp2p_ifdel(wl, &p2p_mac);
+ /* Firmware could not delete the interface so we will not get WLC_E_IF
+ * event for cleaning the dhd virtual nw interace
+ * So lets do it here. Failures from fw will ensure the application to do
+ * ifconfig <inter> down and up sequnce, which will reload the fw
+ * however we should cleanup the linux network virtual interfaces
+ */
+ /* Request framework to RESET and clean up */
+ if (ret) {
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ WL_ERR(("Firmware returned an error (%d) from p2p_ifdel"
+ "HANG Notification sent to %s\n", ret, ndev->name));
+ wl_cfg80211_hang(ndev, WLAN_REASON_UNSPECIFIED);
+ }
+
+ /* Wait for any pending scan req to get aborted from the sysioc context */
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(wl, IF_DELETING)) {
+ WL_DBG(("IFDEL operation done\n"));
+#if defined(WL_ENABLE_P2P_IF)
+ DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (wl));
+#endif /* (WL_ENABLE_P2P_IF)) */
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
+ ret = dhd_del_monitor(dev);
+ }
+ }
+ return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 ap = 0;
+ s32 infra = 0;
+ s32 wlif_type;
+ s32 mode = 0;
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ ap = 1;
+ WL_ERR(("type (%d) : currently we do not support this type\n",
+ type));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ap) {
+ wl_set_mode_by_netdev(wl, ndev, mode);
+ if (wl->p2p_supported && wl->p2p->vif_created) {
+ WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created,
+ p2p_on(wl)));
+ wldev_iovar_setint(ndev, "mpc", 0);
+ wl_notify_escan_complete(wl, ndev, true, true);
+
+ /* In concurrency case, STA may be already associated in a particular
+ * channel. so retrieve the current channel of primary interface and
+ * then start the virtual interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ wlif_type = ap ? WL_P2P_IF_GO : WL_P2P_IF_CLIENT;
+ WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+ ndev->name, ap, infra, type));
+ wl_set_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+ wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ wl_set_mode_by_netdev(wl, ndev, mode);
+ wl_clr_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ } else if (ndev == wl_to_prmry_ndev(wl) &&
+ !wl_get_drv_status(wl, AP_CREATED, ndev)) {
+ wl_set_drv_status(wl, AP_CREATING, ndev);
+ if (!wl->ap_info &&
+ !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+ WL_ERR(("struct ap_saved_ie allocation failed\n"));
+ return -ENOMEM;
+ }
+ } else {
+ WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+ return -EINVAL;
+ }
+ }
+
+ ndev->ieee80211_ptr->iftype = type;
+ return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
+ void* _net_attach)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ s32 ret = BCME_OK;
+ WL_DBG(("Enter"));
+ if (!ndev) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+ if (wl->p2p_supported && wl_get_p2p_status(wl, IF_ADD)) {
+ WL_DBG(("IF_ADD event called from dongle, old interface name: %s,"
+ "new name: %s\n", ndev->name, wl->p2p->vir_ifname));
+ /* Assign the net device to CONNECT BSSCFG */
+ strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1);
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx;
+ wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach;
+ ndev->ifindex = idx;
+ wl_clr_p2p_status(wl, IF_ADD);
+
+ wake_up_interruptible(&wl->netif_change_event);
+ } else {
+ ret = BCME_NOTREADY;
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *ndev)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ bool rollback_lock = false;
+ s32 index = 0;
+ if (!ndev || !ndev->name) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+
+ if (p2p_is_on(wl) && wl->p2p->vif_created &&
+ wl_get_p2p_status(wl, IF_DELETING)) {
+ if (wl->scan_request &&
+ (wl->escan_info.ndev == ndev)) {
+ /* Abort any pending scan requests */
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (!rtnl_is_locked()) {
+ rtnl_lock();
+ rollback_lock = true;
+ }
+ WL_DBG(("ESCAN COMPLETED\n"));
+ wl_notify_escan_complete(wl, ndev, true, false);
+ if (rollback_lock)
+ rtnl_unlock();
+ }
+ WL_ERR(("IF_DEL event called from dongle, net %x, vif name: %s\n",
+ (unsigned int)ndev, wl->p2p->vir_ifname));
+
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ index = wl_cfgp2p_find_idx(wl, ndev);
+ wl_to_p2p_bss_ndev(wl, index) = NULL;
+ wl_to_p2p_bss_bssidx(wl, index) = 0;
+ wl->p2p->vif_created = false;
+ wl_cfgp2p_clear_management_ie(wl,
+ index);
+ wl_clr_p2p_status(wl, IF_DELETING);
+ WL_DBG(("index : %d\n", index));
+
+ }
+ /* Wake up any waiting thread */
+ wake_up_interruptible(&wl->netif_change_event);
+
+ return 0;
+}
+
+s32
+wl_cfg80211_is_progress_ifadd(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_ADD))
+ is_progress = 1;
+ return is_progress;
+}
+
+s32
+wl_cfg80211_is_progress_ifchange(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING))
+ is_progress = 1;
+ return is_progress;
+}
+
+
+s32
+wl_cfg80211_notify_ifchange(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING)) {
+ wl_set_p2p_status(wl, IF_CHANGED);
+ wake_up_interruptible(&wl->netif_change_event);
+ }
+ return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+ u32 n_ssids;
+ u32 n_channels;
+ u16 channel;
+ chanspec_t chanspec;
+ s32 i, offset;
+ char *ptr;
+ wlc_ssid_t ssid;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+ memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+ WL_SCAN(("Preparing Scan request\n"));
+ WL_SCAN(("nprobes=%d\n", params->nprobes));
+ WL_SCAN(("active_time=%d\n", params->active_time));
+ WL_SCAN(("passive_time=%d\n", params->passive_time));
+ WL_SCAN(("home_time=%d\n", params->home_time));
+ WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ /* if request is null just exit so it will be all channel broadcast scan */
+ if (!request)
+ return;
+
+ n_ssids = request->n_ssids;
+ n_channels = request->n_channels;
+
+ /* Copy channel array if applicable */
+ WL_SCAN(("### List of channelspecs to scan ###\n"));
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ chanspec = 0;
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+
+ params->channel_list[i] = channel;
+ params->channel_list[i] &= WL_CHANSPEC_CHAN_MASK;
+ params->channel_list[i] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, params->channel_list[i]));
+ params->channel_list[i] = wl_chspec_host_to_driver(params->channel_list[i]);
+ }
+ } else {
+ WL_SCAN(("Scanning all channels\n"));
+ }
+
+ /* Copy ssid array if applicable */
+ WL_SCAN(("### List of SSIDs to scan ###\n"));
+ if (n_ssids > 0) {
+ offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+ offset = roundup(offset, sizeof(u32));
+ ptr = (char*)params + offset;
+ for (i = 0; i < n_ssids; i++) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ ssid.SSID_len = request->ssids[i].ssid_len;
+ memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+ if (!ssid.SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ /* Adding mask to channel numbers */
+ params->channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action)
+{
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size =
+ (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
+ struct wl_iscan_params *params;
+ s32 err = 0;
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+ if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length is not sufficient\n"));
+ err = -ENOMEM;
+ goto done;
+ }
+ err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err)) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : iscan canceled\n"));
+ } else {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ kfree(params);
+done:
+ return err;
+}
+
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 passive_scan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_SCANING;
+
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_DBG(("error (%d)\n", err));
+ return err;
+ }
+ wl->iscan_kickstart = true;
+ wl_run_iscan(iscan, request, WL_SCAN_ACTION_START);
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
+
+ memset(valid_chan_list, 0, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
+ }
+
+ return err;
+}
+static s32
+wl_run_escan(struct wl_priv *wl, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ wl_escan_params_t *params = NULL;
+ struct cfg80211_scan_request *scan_request = wl->scan_request;
+ u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ u32 num_chans = 0;
+ s32 channel;
+ s32 n_valid_chan;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i, j, n_nodfs = 0;
+ u16 *default_chan_list = NULL;
+ wl_uint32_list_t *list;
+ struct net_device *dev = NULL;
+ WL_DBG(("Enter \n"));
+
+
+ if (!wl->p2p_supported || ((ndev == wl_to_prmry_ndev(wl)) &&
+ !p2p_scan(wl))) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+ params->version = htod32(ESCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->sync_id = htod16(0x1234);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ kfree(params);
+ err = -ENOMEM;
+ goto exit;
+ }
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err))
+ WL_ERR((" Escan set error (%d)\n", err));
+ kfree(params);
+ }
+ else if (p2p_is_on(wl) && p2p_scan(wl)) {
+ /* P2P SCAN TRIGGER */
+ s32 _freq = 0;
+ n_nodfs = 0;
+ if (scan_request && scan_request->n_channels) {
+ num_chans = scan_request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+ list = (wl_uint32_list_t *) chan_buf;
+ n_valid_chan = dtoh32(list->count);
+ for (i = 0; i < num_chans; i++)
+ {
+ _freq = scan_request->channels[i]->center_freq;
+ channel = ieee80211_frequency_to_channel(_freq);
+ /* remove DFS channels */
+ if (channel < 52 || channel > 140) {
+ for (j = 0; j < n_valid_chan; j++) {
+ /* allows only supported channel on
+ * current reguatory
+ */
+ if (channel == (dtoh32(list->element[j])))
+ default_chan_list[n_nodfs++] =
+ channel;
+ }
+ }
+
+ }
+ }
+ if (num_chans == 3 && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ WL_INFO(("P2P SEARCH PHASE START \n"));
+ } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) &&
+ (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP)) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_INFO(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ num_chans = n_nodfs;
+
+ } else {
+ WL_INFO(("P2P SCAN STATE START \n"));
+ num_chans = n_nodfs;
+ }
+
+ }
+ err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ kfree(default_chan_list);
+ }
+exit:
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ return err;
+}
+
+
+static s32
+wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+ wl->escan_info.ndev = ndev;
+ wl->escan_info.wiphy = wiphy;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results = (wl_scan_results_t *) wl->escan_info.escan_buf;
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START);
+ return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ struct ether_addr primary_mac;
+ wpa_ie_fixed_t *wps_ie;
+ s32 passive_scan;
+ bool iscan_req;
+ bool escan_req = false;
+ bool p2p_ssid;
+ s32 err = 0;
+ s32 i;
+ u32 wpsie_len = 0;
+ u8 wpsie[IE_MAX_LEN];
+
+ /* If scan req comes for p2p0, send it over primary I/F
+ * Scan results will be delivered corresponding to cfg80211_scan_request
+ */
+ if (ndev == wl->p2p_net) {
+ ndev = wl_to_prmry_ndev(wl);
+ }
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (wl_get_drv_status_all(wl, SCANNING)) {
+ WL_ERR(("Scanning already\n"));
+ return -EAGAIN;
+ }
+ if (wl_get_drv_status(wl, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ return -EAGAIN;
+ }
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+
+ /* Arm scan timeout timer */
+ mod_timer(&wl->scan_timeout, jiffies + WL_SCAN_TIMER_INTERVAL_MS * HZ / 1000);
+ iscan_req = false;
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) {
+ iscan_req = true;
+ } else if (wl->escan_on) {
+ escan_req = true;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len && IS_P2P_SSID(ssids[i].ssid)) {
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (wl->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(wl) == false) {
+ /* p2p on at the first time */
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ }
+ p2p_scan(wl) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (wl->p2p_supported) {
+ p2p_scan(wl) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+ if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl)))
+ p2p_scan(wl) = true;
+
+ if (p2p_scan(wl) == false) {
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(wl,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!wl->p2p_supported || !p2p_scan(wl)) {
+ if (ndev == wl_to_prmry_ndev(wl)) {
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie(
+ (u8 *)request->ie,
+ request->ie_len)) != NULL) {
+ wpsie_len =
+ wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, ndev, -1,
+ VNDR_IE_PRBREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ }
+ }
+ } else { /* scan in ibss */
+ /* we don't do iscan in ibss */
+ ssids = this_ssid;
+ }
+ wl->scan_request = request;
+ wl_set_drv_status(wl, SCANNING, ndev);
+ if (iscan_req) {
+ err = wl_do_iscan(wl, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+ } else if (escan_req) {
+ if (wl->p2p_supported) {
+ if (p2p_on(wl) && p2p_scan(wl)) {
+
+ err = wl_cfgp2p_enable_discovery(wl, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ err = wl_do_escan(wl, wiphy, ndev, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+
+
+ } else {
+ memset(&sr->ssid, 0, sizeof(sr->ssid));
+ sr->ssid.SSID_len =
+ min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
+ if (sr->ssid.SSID_len) {
+ memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
+ sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
+ WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n",
+ sr->ssid.SSID, sr->ssid.SSID_len));
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+ goto scan_out;
+ }
+ err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid,
+ sizeof(sr->ssid), false);
+ if (err) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : scan for \"%s\" "
+ "canceled\n", sr->ssid.SSID));
+ } else {
+ WL_ERR(("WLC_SCAN error (%d)\n", err));
+ }
+ goto scan_out;
+ }
+ }
+
+ return 0;
+
+scan_out:
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ wl->scan_request = NULL;
+ return err;
+}
+
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ CHECK_SYS_UP(wl);
+
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+ retry = htod32(retry);
+ err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), false);
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ WL_DBG(("Enter\n"));
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (wl->conf->rts_threshold != wiphy->rts_threshold)) {
+ wl->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, wl->conf->rts_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (wl->conf->frag_threshold != wiphy->frag_threshold)) {
+ wl->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, wl->conf->frag_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (wl->conf->retry_long != wiphy->retry_long)) {
+ wl->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, wl->conf->retry_long, true);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (wl->conf->retry_short != wiphy->retry_short)) {
+ wl->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, wl->conf->retry_short, false);
+ if (!err) {
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *chan;
+ struct wl_join_params join_params;
+ struct cfg80211_ssid ssid;
+ s32 scan_retry = 0;
+ s32 err = 0;
+ bool rollback_lock = false;
+
+ WL_TRACE(("In\n"));
+ CHECK_SYS_UP(wl);
+ if (params->bssid) {
+ WL_ERR(("Invalid bssid\n"));
+ return -EOPNOTSUPP;
+ }
+ bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+ if (!bss) {
+ memcpy(ssid.ssid, params->ssid, params->ssid_len);
+ ssid.ssid_len = params->ssid_len;
+ do {
+ if (unlikely
+ (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+ -EBUSY)) {
+ wl_delay(150);
+ } else {
+ break;
+ }
+ } while (++scan_retry < WL_SCAN_RETRY_MAX);
+ /* to allow scan_inform to propagate to cfg80211 plane */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+
+ /* wait 4 secons till scan done.... */
+ schedule_timeout_interruptible(4 * HZ);
+ if (rollback_lock)
+ rtnl_lock();
+ bss = cfg80211_get_ibss(wiphy, NULL,
+ params->ssid, params->ssid_len);
+ }
+ if (bss) {
+ wl->ibss_starter = false;
+ WL_DBG(("Found IBSS\n"));
+ } else {
+ wl->ibss_starter = true;
+ }
+ chan = params->channel;
+ if (chan)
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ memset(&join_params, 0, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+ params->ssid_len);
+ join_params.ssid.SSID_len = htod32(params->ssid_len);
+ if (params->bssid)
+ memcpy(&join_params.params.bssid, params->bssid,
+ ETHER_ADDR_LEN);
+ else
+ memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ sizeof(join_params), false);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ wl_link_down(wl);
+
+ return err;
+}
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ val = WPA2_AUTH_PSK| WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+
+ if (is_wps_conn(sme))
+ val = WPA_AUTH_DISABLED;
+
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ WL_DBG((" * wl_set_wpa_version, set wpa_auth"
+ " to WPA_AUTH_WAPI 0x400"));
+ val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+ }
+#endif
+ WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wpa_auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->wpa_versions = sme->crypto.wpa_versions;
+ return err;
+}
+
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG((" %s \n", __FUNCTION__));
+
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie,
+ sme->ie_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ if (unlikely(err)) {
+ WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
+ return err;
+ }
+ } else
+ WL_DBG((" * skip \n"));
+ return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ switch (sme->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ val = 0;
+ WL_DBG(("open system\n"));
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ val = 1;
+ WL_DBG(("shared key\n"));
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ val = 2;
+ WL_DBG(("automatic\n"));
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ WL_DBG(("network eap\n"));
+ default:
+ val = 2;
+ WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+ break;
+ }
+
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->auth_type = sme->auth_type;
+ return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
+ s32 err = 0;
+#ifdef BCMWAPI_WPI
+ s32 val = 0;
+#endif
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ switch (sme->crypto.ciphers_pairwise[0]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ pval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ val = SMS4_ENABLED;
+ pval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ }
+ if (sme->crypto.cipher_group) {
+ switch (sme->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ gval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ gval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ gval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ val = SMS4_ENABLED;
+ gval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+
+ WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+ if (is_wps_conn(sme)) {
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+ } else {
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+ WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
+ } else {
+#endif
+ WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec",
+ pval | gval, bssidx);
+#ifdef BCMWAPI_WPI
+ }
+#endif
+ }
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+ sec->cipher_group = sme->crypto.cipher_group;
+
+ return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_akm_suites) {
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+#ifdef BCMWAPI_WPI
+ else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_WAPI_CERT:
+ val = WAPI_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_WAPI_PSK:
+ val = WAPI_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+#endif
+ WL_DBG(("setting wpa_auth to %d\n", val));
+
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (%d)\n", err));
+ return err;
+ }
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->wpa_auth = sme->crypto.akm_suites[0];
+
+ return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ struct wl_wsec_key key;
+ s32 val;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key len (%d)\n", sme->key_len));
+ if (sme->key_len) {
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise));
+ if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+#ifdef BCMWAPI_WPI
+ NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
+#else
+ NL80211_WPA_VERSION_2)) &&
+#endif
+ (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+#ifdef BCMWAPI_WPI
+ WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
+#else
+ WLAN_CIPHER_SUITE_WEP104)))
+#endif
+ {
+ memset(&key, 0, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = WL_PRIMARY_KEY;
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo));
+ WL_DBG(("key \"%s\"\n", key.data));
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
+ WL_DBG(("set auth_type to shared key\n"));
+ val = 1; /* shared key */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ }
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct ieee80211_channel *chan = sme->channel;
+ wl_extjoin_params_t *ext_join_params;
+ struct wl_join_params join_params;
+ size_t join_params_size;
+ s32 err = 0;
+ wpa_ie_fixed_t *wpa_ie;
+ wpa_ie_fixed_t *wps_ie;
+ bcm_tlv_t *wpa2_ie;
+ u8* wpaie = 0;
+ u32 wpaie_len = 0;
+ u32 wpsie_len = 0;
+ u32 chan_cnt = 0;
+ u8 wpsie[IE_MAX_LEN];
+ struct ether_addr bssid;
+ WL_DBG(("In\n"));
+ CHECK_SYS_UP(wl);
+
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ /* Clean BSSID */
+ bzero(&bssid, sizeof(bssid));
+ wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+ if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) {
+ /* we only allow to connect using virtual interface in case of P2P */
+ if (p2p_is_on(wl) && is_wps_conn(sme)) {
+ WL_DBG(("ASSOC1 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ /* Have to apply WPS IE + P2P IE in assoc req frame */
+ wl_cfgp2p_set_management_ie(wl, dev,
+ wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG,
+ wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie,
+ wl_to_p2p_bss_saved_ie(wl,
+ P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ } else if (p2p_is_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
+ /* This is the connect req after WPS is done [credentials exchanged]
+ * currently identified with WPA_VERSION_2 .
+ * Update the previously set IEs with
+ * the newly received IEs from Supplicant. This will remove the WPS IE from
+ * the Assoc Req.
+ */
+ WL_DBG(("ASSOC2 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ }
+
+ } else if (dev == wl_to_prmry_ndev(wl)) {
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if (wpa_ie != NULL || wpa2_ie != NULL) {
+ wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+ wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+ wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+ wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ } else {
+ wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ }
+
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, dev, -1,
+ VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ return err;
+ }
+ }
+ if (unlikely(!sme->ssid)) {
+ WL_ERR(("Invalid ssid\n"));
+ return -EOPNOTSUPP;
+ }
+ if (chan) {
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ chan_cnt = 1;
+ WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
+ chan->center_freq));
+ } else
+ wl->channel = 0;
+#ifdef BCMWAPI_WPI
+ WL_DBG(("1. enable wapi auth\n"));
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ WL_DBG(("2. set wapi ie \n"));
+ err = wl_set_set_wapi_ie(dev, sme);
+ if (unlikely(err))
+ return err;
+ } else
+ WL_DBG(("2. Not wapi ie \n"));
+#endif
+ WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+ WL_DBG(("3. set wapi version \n"));
+ err = wl_set_wpa_version(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid wpa_version\n"));
+ return err;
+ }
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
+ WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
+ else {
+ WL_DBG(("4. wl_set_auth_type\n"));
+#endif
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ return err;
+ }
+#ifdef BCMWAPI_WPI
+ }
+#endif
+
+ err = wl_set_set_cipher(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid ciper\n"));
+ return err;
+ }
+
+ err = wl_set_key_mgmt(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid key mgmt\n"));
+ return err;
+ }
+
+ err = wl_set_set_sharedkey(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid shared key\n"));
+ return err;
+ }
+
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ goto exit;
+ }
+ ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+ ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ ext_join_params->scan.nprobes = 2;
+ /* increate dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan.active_time = WL_SCAN_ACTIVE_TIME*3;
+ ext_join_params->scan.passive_time = WL_SCAN_PASSIVE_TIME*3;
+ ext_join_params->scan.home_time = -1;
+
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ if (chan_cnt) {
+ u16 channel, band, bw, ctl_sb;
+ chanspec_t chspec;
+ channel = wl->channel;
+ band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+ }
+ ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING, dev);
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, wl_cfgp2p_find_idx(wl, dev), &wl->ioctl_buf_sync);
+ kfree(ext_join_params);
+ if (err) {
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("join iovar is not supported\n"));
+ goto set_ssid;
+ } else
+ WL_ERR(("error (%d)\n", err));
+ } else
+ goto exit;
+
+set_ssid:
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ if (sme->bssid)
+ memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+ wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
+ WL_DBG(("join_param_size %d\n", join_params_size));
+
+ if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING, dev);
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+ if (err) {
+ WL_ERR(("error (%d)\n", err));
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ }
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scbval;
+ bool act = false;
+ s32 err = 0;
+ u8 *curbssid;
+ WL_ERR(("Reason %d\n", reason_code));
+ CHECK_SYS_UP(wl);
+ act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT);
+ curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID);
+ if (act) {
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ wl_set_drv_status(wl, DISCONNECTING, dev);
+ scbval.val = reason_code;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ if (unlikely(err)) {
+ wl_clr_drv_status(wl, DISCONNECTING, dev);
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ u16 txpwrmw;
+ s32 err = 0;
+ s32 disable = 0;
+
+ CHECK_SYS_UP(wl);
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+ return -EINVAL;
+ }
+ break;
+ case NL80211_TX_POWER_FIXED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+ return -EINVAL;
+ }
+ break;
+ }
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ disable = htod32(disable);
+ err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ return err;
+ }
+
+ if (dbm > 0xffff)
+ txpwrmw = 0xffff;
+ else
+ txpwrmw = (u16) dbm;
+ err = wldev_iovar_setint(ndev, "qtxpower",
+ (s32) (bcm_mw_to_qdbm(txpwrmw)));
+ if (unlikely(err)) {
+ WL_ERR(("qtxpower error (%d)\n", err));
+ return err;
+ }
+ wl->conf->tx_power = dbm;
+
+ return err;
+}
+
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 txpwrdbm;
+ u8 result;
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ err = wldev_iovar_getint(ndev, "qtxpower", &txpwrdbm);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ *dbm = (s32) bcm_qdbm_to_mw(result);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ u32 index;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ if (wsec & WEP_ENABLED) {
+ /* Just select a new current key */
+ index = (u32) key_idx;
+ index = htod32(index);
+ err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+ sizeof(index), true);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_wsec_key key;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ s32 mode = wl_get_mode_by_netdev(wl, dev);
+ memset(&key, 0, sizeof(key));
+ key.index = (u32) key_idx;
+
+ if (!ETHER_ISMULTI(mac_addr))
+ memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+ key.len = (u32) params->key_len;
+
+ /* check for key index change */
+ if (key.len == 0) {
+ /* key delete */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("key delete error (%d)\n", err));
+ return err;
+ }
+ } else {
+ if (key.len > sizeof(key.data)) {
+ WL_ERR(("Invalid key length (%d)\n", key.len));
+ return -EINVAL;
+ }
+ WL_DBG(("Setting the key index %d\n", key.index));
+ memcpy(key.data, params->key, key.len);
+
+ if ((mode == WL_MODE_BSS) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ u8 keybuf[8];
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
+
+ /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ u8 *ivptr;
+ ivptr = (u8 *) params->seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = true;
+ }
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+ swap_key_from_BE(&key);
+#if defined(CONFIG_WIRELESS_EXT)
+ dhd_wait_pend8021x(dev);
+#endif
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wl_wsec_key key;
+ s32 val = 0;
+ s32 wsec = 0;
+ s32 err = 0;
+ u8 keybuf[8];
+ s32 bssidx = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 mode = wl_get_mode_by_netdev(wl, dev);
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (mac_addr) {
+ wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+ goto exit;
+ }
+ memset(&key, 0, sizeof(key));
+
+ key.len = (u32) params->key_len;
+ key.index = (u32) key_idx;
+
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, params->key, key.len);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ val = TKIP_ENABLED;
+ /* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+ if (mode == WL_MODE_BSS) {
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ val = SMS4_ENABLED;
+ break;
+#endif /* BCMWAPI_WPI */
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+
+exit:
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("get wsec error (%d)\n", err));
+ return err;
+ }
+
+ wsec |= val;
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wsec error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("Enter\n"));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+
+ key.flags = WL_PRIMARY_KEY;
+ key.algo = CRYPTO_ALGO_OFF;
+ key.index = (u32) key_idx;
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ if (err == -EINVAL) {
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ WL_DBG(("invalid key index (%d)\n", key_idx));
+ }
+ } else {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ }
+ return err;
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie, struct key_params * params))
+{
+ struct key_params params;
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+ key.index = key_idx;
+ swap_key_to_BE(&key);
+ memset(&params, 0, sizeof(params));
+ params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+ memcpy(params.key, key.data, params.key_len);
+
+ wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ switch (wsec & ~SES_OW_ENABLED) {
+ case WEP_ENABLED:
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP40;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ }
+ break;
+ case TKIP_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case AES_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid algo (0x%x)\n", wsec));
+ return -EINVAL;
+ }
+
+ callback(cookie, &params);
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx)
+{
+ WL_INFO(("Not supported\n"));
+ return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scb_val;
+ s32 rssi;
+ s32 rate;
+ s32 err = 0;
+ sta_info_t *sta;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ CHECK_SYS_UP(wl);
+ if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) {
+ err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+ ETHER_ADDR_LEN, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ return err;
+ }
+ sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sta = (sta_info_t *)wl->ioctl_buf;
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->connected_time = sta->in;
+ }
+ WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+ sta->idle * 1000));
+#endif
+ } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS) {
+ u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID);
+ if (!wl_get_drv_status(wl, CONNECTED, dev) ||
+ (dhd_is_associated(dhd, NULL) == FALSE)) {
+ WL_ERR(("NOT assoc\n"));
+ err = -ENODEV;
+ goto get_station_err;
+ }
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACSTR" != "MACSTR"\n",
+ MAC2STR(mac), MAC2STR(curmacp)));
+ }
+
+ /* Report the current tx rate */
+ err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
+ rate = dtoh32(rate);
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ }
+
+ memset(&scb_val, 0, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t), false);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
+ }
+ rssi = dtoh32(scb_val.val);
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+
+get_station_err:
+ if (err) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ WL_ERR(("force cfg80211_disconnected\n"));
+ wl_clr_drv_status(wl, CONNECTED, dev);
+ cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, s32 timeout)
+{
+ s32 pm;
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ CHECK_SYS_UP(wl);
+
+ if (wl->p2p_net == dev) {
+ return err;
+ }
+
+ pm = PM_OFF; /* enabled ? PM_FAST : PM_OFF; */
+ /* Do not enable the power save after assoc if it is p2p interface */
+ if (wl->p2p && wl->p2p->vif_created) {
+ WL_DBG(("Do not enable the power save for p2p interfaces even after assoc\n"));
+ pm = PM_OFF;
+ }
+ pm = htod32(pm);
+ WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+ err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+ if (unlikely(err)) {
+ if (err == -ENODEV)
+ WL_DBG(("net_device is not ready yet\n"));
+ else
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+ u32 ret = 0;
+
+ if (bit16 & 0xff00) {
+ ret += 8;
+ bit16 >>= 8;
+ }
+
+ if (bit16 & 0xf0) {
+ ret += 4;
+ bit16 >>= 4;
+ }
+
+ if (bit16 & 0xc) {
+ ret += 2;
+ bit16 >>= 2;
+ }
+
+ if (bit16 & 2)
+ ret += bit16 & 2;
+ else if (bit16)
+ ret += bit16;
+
+ return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
+ WL_INFO(("device is not ready\n"));
+ return 0;
+ }
+
+ wl_invoke_iscan(wl);
+
+ return err;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_info *iter, *next;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ unsigned long flags;
+ if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
+ WL_INFO(("device is not ready : status (%d)\n",
+ (int)wl->status));
+ return 0;
+ }
+ for_each_ndev(wl, iter, next)
+ wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ wl_term_iscan(wl);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ for_each_ndev(wl, iter, next) {
+ wl_clr_drv_status(wl, SCANNING, iter->ndev);
+ wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ for_each_ndev(wl, iter, next) {
+ if (wl_get_drv_status(wl, CONNECTING, iter->ndev)) {
+ wl_bss_connect_done(wl, iter->ndev, NULL, NULL, false);
+ }
+ }
+#endif /* DHD_CLEAR_ON_SUSPEND */
+ return 0;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+ s32 err)
+{
+ int i, j;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct net_device *primary_dev = wl_to_prmry_ndev(wl);
+
+ if (!pmk_list) {
+ printk("pmk_list is NULL\n");
+ return -EINVAL;
+ }
+ /* pmk list is supported only for STA interface i.e. primary interface
+ * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+ */
+ if (primary_dev != dev) {
+ WL_INFO(("Not supporting Flushing pmklist on virtual"
+ " interfaces than primary interface\n"));
+ return err;
+ }
+
+ WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ WL_DBG(("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID));
+ for (j = 0; j < WPA2_PMKID_LEN; j++) {
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ }
+ }
+ if (likely(!err)) {
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list), wl->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ if (i < WL_NUM_PMKIDS_MAX) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+ WPA2_PMKID_LEN);
+ if (i == wl->pmk_list->pmkids.npmkid)
+ wl->pmk_list->pmkids.npmkid++;
+ } else {
+ err = -EINVAL;
+ }
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n",
+ wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].
+ PMKID[i]));
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct _pmkid_list pmkid;
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+ memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+ }
+
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp
+ (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+
+ if ((wl->pmk_list->pmkids.npmkid > 0) &&
+ (i < wl->pmk_list->pmkids.npmkid)) {
+ memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+ for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ wl->pmk_list->pmkids.npmkid--;
+ } else {
+ err = -EINVAL;
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ CHECK_SYS_UP(wl);
+ memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+ return err;
+
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+ wl_scan_params_t *params;
+ int params_size;
+ int num_chans;
+
+ *out_params_size = 0;
+
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size));
+ return params;
+ }
+ memset(params, 0, params_size);
+ params->nprobes = nprobes;
+
+ num_chans = (channel == 0) ? 0 : 1;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(10);
+ if (channel == -1)
+ params->channel_list[0] = htodchanspec(channel);
+ else
+ params->channel_list[0] = wl_ch_host_to_driver(channel);
+
+ /* Our scan params have 1 channel and 0 ssids */
+ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ *out_params_size = params_size; /* rtn size to the caller */
+ return params;
+}
+
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel * channel,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ s32 target_channel;
+ u32 id;
+ struct ether_addr primary_mac;
+ struct net_device *ndev = NULL;
+
+ s32 err = BCME_OK;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex));
+
+ if (wl->p2p_net == dev) {
+ ndev = wl_to_prmry_ndev(wl);
+ } else {
+ ndev = dev;
+ }
+
+ if (wl_get_drv_status(wl, SCANNING, ndev)) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ }
+
+ target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+ memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+ wl->remain_on_chan_type = channel_type;
+ id = ++wl->last_roc_id;
+ if (id == 0)
+ id = ++wl->last_roc_id;
+ *cookie = id;
+ cfg80211_ready_on_channel(dev, *cookie, channel,
+ channel_type, duration, GFP_KERNEL);
+ if (wl->p2p && !wl->p2p->on) {
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+
+ /* In case of p2p_listen command, supplicant send remain_on_channel
+ * without turning on P2P
+ */
+
+ p2p_on(wl) = true;
+ err = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0);
+
+ if (unlikely(err)) {
+ goto exit;
+ }
+ }
+ if (p2p_is_on(wl))
+ wl_cfgp2p_discover_listen(wl, target_channel, duration);
+
+
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ u64 cookie)
+{
+ s32 err = 0;
+ WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex));
+ return err;
+}
+static s32
+wl_cfg80211_send_pending_tx_act_frm(struct wl_priv *wl)
+{
+ wl_af_params_t *tx_act_frm;
+ struct net_device *dev = wl->afx_hdl->dev;
+ if (!p2p_is_on(wl))
+ return -1;
+
+ if (dev == wl->p2p_net) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ tx_act_frm = wl->afx_hdl->pending_tx_act_frm;
+ WL_DBG(("Sending the action frame\n"));
+ wl->afx_hdl->pending_tx_act_frm = NULL;
+ if (tx_act_frm != NULL) {
+ /* Suspend P2P discovery's search-listen to prevent it from
+ * starting a scan or changing the channel.
+ */
+ wl_clr_drv_status(wl, SENDING_ACT_FRM, wl->afx_hdl->dev);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ wl_notify_escan_complete(wl, dev, true, true);
+ wl_cfgp2p_discover_enable_search(wl, false);
+ tx_act_frm->channel = wl->afx_hdl->peer_chan;
+ wl->afx_hdl->ack_recv = (wl_cfgp2p_tx_action_frame(wl, dev,
+ tx_act_frm, wl->afx_hdl->bssidx)) ? false : true;
+ }
+ return 0;
+}
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_instance;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ afx_instance = container_of(work, struct afx_hdl, work);
+ if (afx_instance != NULL) {
+ wl_cfgp2p_act_frm_search(wl, wl->afx_hdl->dev,
+ wl->afx_hdl->bssidx, 0);
+ }
+}
+
+static bool
+wl_cfg80211_send_at_common_channel(struct wl_priv *wl,
+ struct net_device *dev,
+ wl_af_params_t *af_params)
+{
+ WL_DBG((" enter ) \n"));
+ /* initialize afx_hdl */
+ wl->afx_hdl->pending_tx_act_frm = af_params;
+ wl->afx_hdl->bssidx = wl_cfgp2p_find_idx(wl, dev);
+ wl->afx_hdl->dev = dev;
+ wl->afx_hdl->retry = 0;
+ wl->afx_hdl->peer_chan = WL_INVALID;
+ wl->afx_hdl->ack_recv = false;
+ memcpy(wl->afx_hdl->pending_tx_dst_addr.octet,
+ af_params->action_frame.da.octet,
+ sizeof(wl->afx_hdl->pending_tx_dst_addr.octet));
+ /* Loop to wait until we have sent the pending tx action frame or the
+ * pending action frame tx is cancelled.
+ */
+ while ((wl->afx_hdl->retry < WL_CHANNEL_SYNC_RETRY) &&
+ (wl->afx_hdl->peer_chan == WL_INVALID)) {
+ wl_set_drv_status(wl, SENDING_ACT_FRM, dev);
+ wl_set_drv_status(wl, SCANNING, dev);
+ WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+ wl->afx_hdl->retry));
+ /* Do find_peer_for_action */
+ schedule_work(&wl->afx_hdl->work);
+ wait_for_completion(&wl->act_frm_scan);
+ wl->afx_hdl->retry++;
+ }
+ if (wl->afx_hdl->peer_chan != WL_INVALID)
+ wl_cfg80211_send_pending_tx_act_frm(wl);
+ else {
+ WL_ERR(("Couldn't find the peer after %d retries\n",
+ wl->afx_hdl->retry));
+ }
+ wl->afx_hdl->dev = NULL;
+ wl->afx_hdl->bssidx = WL_INVALID;
+ wl_clr_drv_status(wl, SENDING_ACT_FRM, dev);
+ if (wl->afx_hdl->ack_recv)
+ return true; /* ACK */
+ else
+ return false; /* NO ACK */
+}
+
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev,
+ struct ieee80211_channel *channel, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8* buf, size_t len,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+ bool no_cck,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+ bool dont_wait_for_ack,
+#endif
+ u64 *cookie)
+{
+ wl_action_frame_t *action_frame;
+ wl_af_params_t *af_params;
+ wifi_p2p_ie_t *p2p_ie;
+ wpa_ie_fixed_t *wps_ie;
+ scb_val_t scb_val;
+ wifi_wfd_ie_t *wfd_ie;
+ const struct ieee80211_mgmt *mgmt;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *dev = NULL;
+ s32 err = BCME_OK;
+ s32 bssidx = 0;
+ u32 p2pie_len = 0;
+ u32 wpsie_len = 0;
+ u32 wfdie_len = 0;
+ u32 id;
+ u32 retry = 0;
+ bool ack = false;
+ wifi_p2p_pub_act_frame_t *act_frm = NULL;
+ wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+
+ WL_DBG(("Enter \n"));
+
+ if (ndev == wl->p2p_net) {
+ dev = wl_to_prmry_ndev(wl);
+ } else {
+ /* If TX req is for any valid ifidx. Use as is */
+ dev = ndev;
+ }
+
+ /* find bssidx based on ndev */
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+ if (bssidx == -1) {
+
+ WL_ERR(("Can not find the bssidx for dev( %p )\n", dev));
+ return -ENODEV;
+ }
+ if (p2p_is_on(wl)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return -EFAULT;
+ }
+ }
+ *cookie = 0;
+ id = wl->send_action_id++;
+ if (id == 0)
+ id = wl->send_action_id++;
+ *cookie = id;
+ mgmt = (const struct ieee80211_mgmt *)buf;
+ if (ieee80211_is_mgmt(mgmt->frame_control)) {
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ s32 ie_len = len - ie_offset;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ }
+ if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Total length of WFD Information Element */
+ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id);
+ }
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Order of Vendor IE is 1) WPS IE +
+ * 2) P2P IE created by supplicant
+ * So, it is ok to find start address of WPS IE
+ * to save IEs
+ */
+ wpsie_len = wps_ie->length + sizeof(wps_ie->length) +
+ sizeof(wps_ie->tag);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ VNDR_IE_PRBRSP_FLAG,
+ (u8 *)wps_ie, wpsie_len + p2pie_len + wfdie_len);
+ }
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+ } else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+ ieee80211_is_deauth(mgmt->frame_control)) {
+ memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+ scb_val.val = mgmt->u.disassoc.reason_code;
+ wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t), true);
+ WL_DBG(("Disconnect STA : %s\n",
+ bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf)));
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ /* Abort the dwell time of any previous off-channel
+ * action frame that may be still in effect. Sending
+ * off-channel action frames relies on the driver's
+ * scan engine. If a previous off-channel action frame
+ * tx is still in progress (including the dwell time),
+ * then this new action frame will not be sent out.
+ */
+ wl_notify_escan_complete(wl, dev, true, true);
+
+ }
+
+ } else {
+ WL_ERR(("Driver only allows MGMT packet type\n"));
+ goto exit;
+ }
+
+ af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+ if (af_params == NULL)
+ {
+ WL_ERR(("unable to allocate frame\n"));
+ return -ENOMEM;
+ }
+
+ action_frame = &af_params->action_frame;
+
+ /* Add the packet Id */
+ action_frame->packetId = *cookie;
+ WL_DBG(("action frame %d\n", action_frame->packetId));
+ /* Add BSSID */
+ memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+ memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = len - DOT11_MGMT_HDR_LEN;
+ WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+ /* Add the channel */
+ af_params->channel =
+ ieee80211_frequency_to_channel(channel->center_freq);
+
+ if (channel->band == IEEE80211_BAND_5GHZ) {
+ WL_DBG(("5GHz channel %d", af_params->channel));
+ err = wldev_ioctl(dev, WLC_SET_CHANNEL,
+ &af_params->channel, sizeof(af_params->channel), true);
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d\n", err));
+ }
+ }
+
+ /* Add the dwell time
+ * Dwell time to stay off-channel to wait for a response action frame
+ * after transmitting an GO Negotiation action frame
+ */
+ af_params->dwell_time = WL_DWELL_TIME;
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+ if (wl_cfgp2p_is_pub_action(action_frame->data, action_frame->len)) {
+ act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data);
+ WL_DBG(("P2P PUB action_frame->len: %d chan %d category %d subtype %d\n",
+ action_frame->len, af_params->channel,
+ act_frm->category, act_frm->subtype));
+ } else if (wl_cfgp2p_is_p2p_action(action_frame->data, action_frame->len)) {
+ p2p_act_frm = (wifi_p2p_action_frame_t *) (action_frame->data);
+ WL_DBG(("P2P action_frame->len: %d chan %d category %d subtype %d\n",
+ action_frame->len, af_params->channel,
+ p2p_act_frm->category, p2p_act_frm->subtype));
+ } else if (wl_cfgp2p_is_gas_action(action_frame->data, action_frame->len)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (action_frame->data);
+ WL_DBG(("Service Discovery action_frame->len: %d chan %d category %d action %d\n",
+ action_frame->len, af_params->channel,
+ sd_act_frm->category, sd_act_frm->action));
+
+ }
+ wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len);
+ /*
+ * To make sure to send successfully action frame, we have to turn off mpc
+ */
+
+ if (act_frm && ((act_frm->subtype == P2P_PAF_GON_REQ) ||
+ (act_frm->subtype == P2P_PAF_GON_RSP) ||
+ (act_frm->subtype == P2P_PAF_GON_CONF) ||
+ (act_frm->subtype == P2P_PAF_PROVDIS_REQ))) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ }
+
+ if (act_frm && act_frm->subtype == P2P_PAF_DEVDIS_REQ) {
+ af_params->dwell_time = WL_LONG_DWELL_TIME;
+ } else if (act_frm &&
+ (act_frm->subtype == P2P_PAF_PROVDIS_REQ ||
+ act_frm->subtype == P2P_PAF_PROVDIS_RSP ||
+ act_frm->subtype == P2P_PAF_GON_RSP)) {
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ }
+
+ if (IS_P2P_SOCIAL(af_params->channel) &&
+ (IS_P2P_PUB_ACT_REQ(act_frm, action_frame->len) ||
+ IS_GAS_REQ(sd_act_frm, action_frame->len)) &&
+ wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ /* channel offload require P2P IE for Probe request
+ * otherwise, we will use wl_cfgp2p_tx_action_frame directly.
+ * channel offload for action request frame
+ */
+
+ /* channel offload for action request frame */
+ ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params);
+ } else {
+ ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true;
+ if (!ack) {
+ if (wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ /* if the NO ACK occurs, the peer device will be on
+ * listen channel of the peer
+ * So, we have to find the peer and send action frame on
+ * that channel.
+ */
+ ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params);
+ } else {
+ for (retry = 0; retry < WL_CHANNEL_SYNC_RETRY; retry++) {
+ ack = (wl_cfgp2p_tx_action_frame(wl, dev,
+ af_params, bssidx)) ? false : true;
+ if (ack)
+ break;
+ }
+
+ }
+
+ }
+
+ }
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, ack, GFP_KERNEL);
+ if (act_frm && act_frm->subtype == P2P_PAF_GON_CONF) {
+ wldev_iovar_setint(dev, "mpc", 1);
+ }
+ kfree(af_params);
+exit:
+ return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+
+ WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg));
+
+ if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+
+ return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ if (params->use_cts_prot >= 0) {
+ }
+
+ if (params->use_short_preamble >= 0) {
+ }
+
+ if (params->use_short_slot_time >= 0) {
+ }
+
+ if (params->basic_rates) {
+ }
+
+ if (params->ap_isolate >= 0) {
+ }
+
+ if (params->ht_opmode >= 0) {
+ }
+
+ return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ s32 channel;
+ s32 err = BCME_OK;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ if (wl->p2p_net == dev) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+ dev->ifindex, channel_type, channel));
+ err = wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), true);
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d chip may not be supporting this channel\n", err));
+ }
+ return err;
+}
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+{
+ s32 err = BCME_OK;
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ s32 len = 0;
+ s32 err = BCME_OK;
+ u16 auth = 0; /* d11 open authentication */
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u8* tmp;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ if (wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpa2ie->len;
+ /* check the mcast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ tmp = mcast->oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ gval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ gval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ gval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ gval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("No Security Info\n"));
+ break;
+ }
+ len -= WPA_SUITE_LEN;
+ /* check the unicast cipher */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ ltoh16_ua(&ucast->count);
+ tmp = ucast->list[0].oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ pval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ pval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ pval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ pval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* check the AKM */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1];
+ ltoh16_ua(&mgmt->count);
+ tmp = (u8 *)&mgmt->list[0];
+ switch (tmp[DOT11_OUI_LEN]) {
+ case RSN_AKM_NONE:
+ wpa_auth = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ wpa_auth = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ s32 len = 0;
+ u32 i;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 tmp = 0;
+
+ if (wpaie == NULL)
+ goto exit;
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFO(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ tmp = 0;
+ switch (mcast->type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ gval |= tmp;
+ }
+ }
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO(("no unicast suite\n"));
+ goto exit;
+ }
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ tmp = 0;
+ switch (ucast->list[i].type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ pval |= tmp;
+ }
+ }
+ }
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ tmp = 0;
+ switch (mgmt->list[i].type) {
+ case RSN_AKM_NONE:
+ tmp = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ tmp = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ tmp = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ wpa_auth |= tmp;
+ }
+ }
+
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+#if 0
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ s32 err = BCME_OK;
+ bcm_tlv_t *ssid_ie;
+ wlc_ssid_t ssid;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_join_params join_params;
+ wpa_ie_fixed_t *wps_ie;
+ wpa_ie_fixed_t *wpa_ie;
+ bcm_tlv_t *wpa2_ie;
+ wifi_p2p_ie_t *p2p_ie;
+ wifi_wfd_ie_t *wfd_ie;
+ bool is_bssup = false;
+ bool update_bss = false;
+ bool pbc = false;
+ u16 wpsie_len = 0;
+ u16 p2pie_len = 0;
+ u32 wfdie_len = 0;
+ u8 beacon_ie[IE_MAX_LEN];
+ s32 ie_offset = 0;
+ s32 bssidx = 0;
+ s32 infra = 1;
+ s32 join_params_size = 0;
+ s32 ap = 0;
+ WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+ info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+ if (wl->p2p_net == dev) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+ if (p2p_is_on(wl) &&
+ (bssidx == wl_to_p2p_bss_bssidx(wl,
+ P2PAPI_BSSCFG_CONNECTION))) {
+ memset(beacon_ie, 0, sizeof(beacon_ie));
+ /* We don't need to set beacon for P2P_GO,
+ * but need to parse ssid from beacon_parameters
+ * because there is no way to set ssid
+ */
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+ wl->p2p->ssid.SSID_len = ssid_ie->len;
+ WL_DBG(("SSID (%s) in Head \n", ssid_ie->data));
+
+ } else {
+ WL_ERR(("No SSID in beacon \n"));
+ }
+
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+
+
+ /* find the P2PIE */
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len);
+
+ } else {
+ WL_ERR(("No P2PIE in beacon \n"));
+ }
+
+ /* find the WFD IEs */
+ if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)info->tail, info->tail_len)) != NULL) {
+ /* Total length of P2P Information Element */
+ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id);
+ if ((wpsie_len + p2pie_len + wfdie_len) < IE_MAX_LEN) {
+ memcpy(&beacon_ie[wpsie_len + p2pie_len], wfd_ie, wfdie_len);
+ } else {
+ WL_ERR(("Found WFD IE but there is no space, (%d)(%d)(%d)\n",
+ wpsie_len, p2pie_len, wfdie_len));
+ wfdie_len = 0;
+ }
+ } else {
+ WL_ERR(("No WFDIE in beacon \n"));
+ }
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len + p2pie_len + wfdie_len);
+
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+
+ if (!is_bssup && (wpa2_ie != NULL)) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ if ((err = wl_validate_wpa2ie(dev, wpa2_ie, bssidx)) < 0) {
+ WL_ERR(("WPA2 IE parsing error"));
+ goto exit;
+ }
+ err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ goto exit;
+ }
+ err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid,
+ sizeof(wl->p2p->ssid), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &wl->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GO SSID setting error %d\n", err));
+ goto exit;
+ }
+ if ((err = wl_cfgp2p_bss(wl, dev, bssidx, 1)) < 0) {
+ WL_ERR(("GO Bring up error %d\n", err));
+ goto exit;
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATING, dev)) {
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ ap = 1;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ memcpy(ssid.SSID, ssid_ie->data, ssid_ie->len);
+ WL_DBG(("SSID is (%s) in Head \n", ssid.SSID));
+ ssid.SSID_len = ssid_ie->len;
+ wldev_iovar_setint(dev, "mpc", 0);
+ wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+ wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ wl->ap_info->security_mode = false;
+ return BCME_ERROR;
+ }
+ wl->ap_info->security_mode = true;
+ if (wl->ap_info->rsn_ie) {
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ if (wl->ap_info->wpa_ie) {
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ if (wl->ap_info->wps_ie) {
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = NULL;
+ }
+ if (wpa_ie != NULL) {
+ /* WPAIE */
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ /* RSNIE */
+ wl->ap_info->wpa_ie = NULL;
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else {
+ wl_validate_opensecurity(dev, bssidx);
+ wl->ap_info->security_mode = false;
+ }
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else {
+ WL_DBG(("No WPSIE in beacon \n"));
+ }
+ if (info->interval) {
+ if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+ &info->interval, sizeof(s32), true)) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32), true)) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ return err;
+ }
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ join_params_size = sizeof(join_params.ssid);
+ memcpy(join_params.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(ssid.SSID_len);
+ /* create softap */
+ if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ join_params_size, true)) == 0) {
+ wl_clr_drv_status(wl, AP_CREATING, dev);
+ wl_set_drv_status(wl, AP_CREATED, dev);
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATED, dev)) {
+ ap = 1;
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ if (wl->ap_info->wps_ie &&
+ memcmp(wl->ap_info->wps_ie, wps_ie, wpsie_len)) {
+ WL_DBG((" WPS IE is changed\n"));
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else if (wl->ap_info->wps_ie == NULL) {
+ WL_DBG((" WPS IE is added\n"));
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (!wl->ap_info->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ if (wpa_ie != NULL) {
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else if (wl->ap_info->wpa_ie) {
+ /* change from WPA mode to WPA2 mode */
+ if (wpa2_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ else if (memcmp(wl->ap_info->wpa_ie,
+ wpa_ie, wpa_ie->length +
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+ kfree(wl->ap_info->wpa_ie);
+ update_bss = true;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ } else {
+ /* change from WPA2 mode to WPA mode */
+ if (wpa_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else if (memcmp(wl->ap_info->rsn_ie,
+ wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ }
+ if (update_bss) {
+ wl->ap_info->security_mode = true;
+ wl_cfgp2p_bss(wl, dev, bssidx, 0);
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfgp2p_bss(wl, dev, bssidx, 1);
+ }
+ }
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+ }
+exit:
+ if (err)
+ wldev_iovar_setint(dev, "mpc", 1);
+ return err;
+}
+#endif
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = wl_cfg80211_add_virtual_iface,
+ .del_virtual_intf = wl_cfg80211_del_virtual_iface,
+ .change_virtual_intf = wl_cfg80211_change_virtual_iface,
+ .scan = wl_cfg80211_scan,
+ .set_wiphy_params = wl_cfg80211_set_wiphy_params,
+ .join_ibss = wl_cfg80211_join_ibss,
+ .leave_ibss = wl_cfg80211_leave_ibss,
+ .get_station = wl_cfg80211_get_station,
+ .set_tx_power = wl_cfg80211_set_tx_power,
+ .get_tx_power = wl_cfg80211_get_tx_power,
+ .add_key = wl_cfg80211_add_key,
+ .del_key = wl_cfg80211_del_key,
+ .get_key = wl_cfg80211_get_key,
+ .set_default_key = wl_cfg80211_config_default_key,
+ .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = wl_cfg80211_set_power_mgmt,
+ .connect = wl_cfg80211_connect,
+ .disconnect = wl_cfg80211_disconnect,
+ .suspend = wl_cfg80211_suspend,
+ .resume = wl_cfg80211_resume,
+ .set_pmksa = wl_cfg80211_set_pmksa,
+ .del_pmksa = wl_cfg80211_del_pmksa,
+ .flush_pmksa = wl_cfg80211_flush_pmksa,
+ .remain_on_channel = wl_cfg80211_remain_on_channel,
+ .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+ .mgmt_tx = wl_cfg80211_mgmt_tx,
+ .mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+ .change_bss = wl_cfg80211_change_bss,
+ .set_channel = wl_cfg80211_set_channel,
+#if 0
+ .set_beacon = wl_cfg80211_add_set_beacon,
+ .add_beacon = wl_cfg80211_add_set_beacon,
+#endif
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+ s32 err = 0;
+
+ switch (mode) {
+ case WL_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case WL_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ case WL_MODE_AP:
+ return NL80211_IFTYPE_AP;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return err;
+}
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev)
+{
+ s32 err = 0;
+ wdev->wiphy =
+ wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv));
+ if (unlikely(!wdev->wiphy)) {
+ WL_ERR(("Couldn not allocate wiphy device\n"));
+ err = -ENOMEM;
+ return err;
+ }
+ set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+ wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+ /* Report how many SSIDs Driver can support per Scan request */
+ wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+ wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR);
+
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wdev->wiphy->cipher_suites = __wl_cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+ wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+ wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+ wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif /* !WL_POWERSAVE_DISABLED */
+ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_4ADDR_AP |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+ WIPHY_FLAG_4ADDR_STATION;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif
+ WL_DBG(("Registering custom regulatory)\n"));
+ wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+ /* Now we can register wiphy with cfg80211 module */
+ err = wiphy_register(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ wiphy_free(wdev->wiphy);
+ }
+ return err;
+}
+
+static void wl_free_wdev(struct wl_priv *wl)
+{
+ struct wireless_dev *wdev = wl->wdev;
+ struct wiphy *wiphy;
+ if (!wdev) {
+ WL_ERR(("wdev is invalid\n"));
+ return;
+ }
+ wiphy = wdev->wiphy;
+ wiphy_unregister(wdev->wiphy);
+ wdev->wiphy->dev.parent = NULL;
+
+ wl_delete_all_netinfo(wl);
+ wiphy_free(wiphy);
+ /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "wl",
+ * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+ */
+}
+
+static s32 wl_inform_bss(struct wl_priv *wl)
+{
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ s32 i;
+
+ bss_list = wl->bss_list;
+ WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ break;
+ }
+ return err;
+}
+
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+{
+ struct wiphy *wiphy = wiphy_from_scan(wl);
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct wl_cfg80211_bss_info *notif_bss_info;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ struct beacon_proberesp *beacon_proberesp;
+ struct cfg80211_bss *cbss = NULL;
+ s32 mgmt_type;
+ s32 signal;
+ u32 freq;
+ s32 err = 0;
+
+ if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ return err;
+ }
+ notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+ - sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (unlikely(!notif_bss_info)) {
+ WL_ERR(("notif_bss_info alloc failed\n"));
+ return -ENOMEM;
+ }
+ mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+ notif_bss_info->channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+
+ if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band"));
+ kfree(notif_bss_info);
+ return -EINVAL;
+ }
+ notif_bss_info->rssi = dtoh16(bi->RSSI);
+ memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+ mgmt_type = wl->active_scan ?
+ IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+ if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+ }
+ beacon_proberesp = wl->active_scan ?
+ (struct beacon_proberesp *)&mgmt->u.probe_resp :
+ (struct beacon_proberesp *)&mgmt->u.beacon;
+ beacon_proberesp->timestamp = 0;
+ beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+ beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+ wl_rst_ie(wl);
+
+ wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+ wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+ offsetof(struct wl_cfg80211_bss_info, frame_buf));
+ notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable) + wl_get_ielen(wl);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+ channel = ieee80211_get_channel(wiphy, freq);
+
+ WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+ "mgmt_type %d frame_len %d\n", bi->SSID,
+ notif_bss_info->rssi, notif_bss_info->channel,
+ mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+ notif_bss_info->frame_len));
+
+ signal = notif_bss_info->rssi * 100;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ if (wl->p2p_net && wl->scan_request &&
+ ((wl->scan_request->dev == wl->p2p_net) ||
+ (wl->scan_request->dev == wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)))) {
+#else
+ if (p2p_is_on(wl) && (p2p_scan(wl) ||
+ (wl->scan_request->dev == wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)))) {
+#endif
+ /* find the P2PIE, if we do not find it, we will discard this frame */
+ wifi_p2p_ie_t * p2p_ie;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)beacon_proberesp->variable,
+ wl_get_ielen(wl))) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe response/beacon\n"));
+ kfree(notif_bss_info);
+ return err;
+ }
+ }
+
+ cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(notif_bss_info->frame_len), signal, GFP_KERNEL);
+ if (unlikely(!cbss)) {
+ WL_ERR(("cfg80211_inform_bss_frame error\n"));
+ kfree(notif_bss_info);
+ return -EINVAL;
+ }
+
+ cfg80211_put_bss(cbss);
+ kfree(notif_bss_info);
+
+ return err;
+}
+
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ u16 flags = ntoh16(e->flags);
+
+ WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+ if (event == WLC_E_SET_SSID) {
+ if (status == WLC_E_STATUS_SUCCESS) {
+ if (!wl_is_ibssmode(wl, ndev))
+ return true;
+ }
+ } else if (event == WLC_E_LINK) {
+ if (flags & WLC_EVENT_MSG_LINK)
+ return true;
+ }
+
+ WL_DBG(("wl_is_linkup false\n"));
+ return false;
+}
+
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+
+ if (event == WLC_E_DEAUTH_IND ||
+ event == WLC_E_DISASSOC_IND ||
+ event == WLC_E_DISASSOC ||
+ event == WLC_E_DEAUTH) {
+ return true;
+ } else if (event == WLC_E_LINK) {
+ if (!(flags & WLC_EVENT_MSG_LINK))
+ return true;
+ }
+
+ return false;
+}
+
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+
+ if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+ return true;
+ if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+ return true;
+
+ return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 len = ntoh32(e->datalen);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+ bool isfree = false;
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ s32 freq;
+ s32 channel;
+ u8 body[WL_FRAME_LEN];
+ u16 fc = 0;
+
+ struct ieee80211_supported_band *band;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ channel_info_t ci;
+#else
+ struct station_info sinfo;
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !WL_CFG80211_STA_EVENT */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+ memset(body, 0, sizeof(body));
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+ if (wl_get_mode_by_netdev(wl, ndev) == WL_INVALID)
+ return WL_INVALID;
+
+ if (len > WL_FRAME_LEN) {
+ WL_ERR(("Received frame length %d from dongle is greater than"
+ " allocated body buffer len %d", len, WL_FRAME_LEN));
+ goto exit;
+ }
+ memcpy(body, data, len);
+ wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync);
+ memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ switch (event) {
+ case WLC_E_ASSOC_IND:
+ fc = FC_ASSOC_REQ;
+ break;
+ case WLC_E_REASSOC_IND:
+ fc = FC_REASSOC_REQ;
+ break;
+ case WLC_E_DISASSOC_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH:
+ fc = FC_DISASSOC;
+ break;
+ default:
+ fc = 0;
+ goto exit;
+ }
+ if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false)))
+ return err;
+
+ channel = dtoh32(ci.hw_channel);
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band"));
+ return -EINVAL;
+ }
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+ err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+ &mgmt_frame, &len, body);
+ if (err < 0)
+ goto exit;
+ isfree = true;
+
+ if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ }
+
+exit:
+ if (isfree)
+ kfree(mgmt_frame);
+ return err;
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */
+ sinfo.filled = 0;
+ if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+ reason == DOT11_SC_SUCCESS) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ if (!data) {
+ WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+ return -EINVAL;
+ }
+ sinfo.assoc_req_ies = data;
+ sinfo.assoc_req_ies_len = len;
+ cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */
+ return err;
+}
+
+static s32
+wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+
+ if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ wl_notify_connect_status_ap(wl, ndev, e, data);
+ } else {
+ WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+ ntoh32(e->event_type), ntoh32(e->status), ndev));
+ if (wl_is_linkup(wl, e, ndev)) {
+ wl_link_up(wl);
+ act = true;
+ wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ if (wl_is_ibssmode(wl, ndev)) {
+ printk("cfg80211_ibss_joined\n");
+ cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
+ GFP_KERNEL);
+ WL_DBG(("joined in IBSS network\n"));
+ } else {
+ if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ printk("wl_bss_connect_done succeeded\n");
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)
+ wl_read_prof(wl, ndev, WL_PROF_SSID))->SSID));
+ }
+ }
+
+ } else if (wl_is_linkdown(wl, e)) {
+ if (wl->scan_request) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ } else {
+ del_timer_sync(&wl->scan_timeout);
+ wl_iscan_aborted(wl);
+ }
+ }
+ if (wl_get_drv_status(wl, CONNECTED, ndev)) {
+ scb_val_t scbval;
+ u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ printk("link down, call cfg80211_disconnected\n");
+ wl_clr_drv_status(wl, CONNECTED, ndev);
+ if (! wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ /* To make sure disconnect, explictly send dissassoc
+ * for BSSID 00:00:00:00:00:00 issue
+ */
+ scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ wl_init_prof(wl, ndev);
+ }
+ }
+ else if (wl_get_drv_status(wl, CONNECTING, ndev)) {
+ printk("link down, during connecting\n");
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ }
+ wl_clr_drv_status(wl, DISCONNECTING, ndev);
+
+ } else if (wl_is_nonetwork(wl, e)) {
+ printk("connect failed event=%d e->status 0x%x\n",
+ event, (int)ntoh32(e->status));
+ /* Clean up any pending scan request */
+ if (wl->scan_request) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ } else {
+ del_timer_sync(&wl->scan_timeout);
+ wl_iscan_aborted(wl);
+ }
+ }
+ if (wl_get_drv_status(wl, CONNECTING, ndev))
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ } else {
+ printk("%s nothing\n", __FUNCTION__);
+ }
+ }
+ return err;
+}
+
+static s32
+wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+ WL_DBG(("Enter \n"));
+ if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) {
+ if (wl_get_drv_status(wl, CONNECTED, ndev))
+ wl_bss_roaming_done(wl, ndev, e, data);
+ else
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ act = true;
+ wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ }
+ return err;
+}
+
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev)
+{
+ wl_assoc_info_t assoc_info;
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc info (%d)\n", err));
+ return err;
+ }
+ memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t));
+ assoc_info.req_len = htod32(assoc_info.req_len);
+ assoc_info.resp_len = htod32(assoc_info.resp_len);
+ assoc_info.flags = htod32(assoc_info.flags);
+ if (conn_info->req_ie_len) {
+ conn_info->req_ie_len = 0;
+ bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+ }
+ if (conn_info->resp_ie_len) {
+ conn_info->resp_ie_len = 0;
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ }
+ if (assoc_info.req_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc req (%d)\n", err));
+ return err;
+ }
+ conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+ if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+ conn_info->req_ie_len -= ETHER_ADDR_LEN;
+ }
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
+ }
+ if (assoc_info.resp_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc resp (%d)\n", err));
+ return err;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->resp_ie_len = 0;
+ }
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
+
+ return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+ size_t *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+
+ WL_DBG(("%s join_params->params.chanspec_list[0]= %X\n",
+ __FUNCTION__, join_params->params.chanspec_list[0]));
+
+ }
+}
+
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct cfg80211_bss *bss;
+ struct wl_bss_info *bi;
+ struct wlc_ssid *ssid;
+ struct bcm_tlv *tim;
+ s32 beacon_interval;
+ s32 dtim_period;
+ size_t ie_len;
+ u8 *ie;
+ u8 *curbssid;
+ s32 err = 0;
+ struct wiphy *wiphy;
+
+ wiphy = wl_to_wiphy(wl);
+
+ if (wl_is_ibssmode(wl, ndev))
+ return err;
+
+ ssid = (struct wlc_ssid *)wl_read_prof(wl, ndev, WL_PROF_SSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ mutex_lock(&wl->usr_sync);
+ if (!bss) {
+ WL_DBG(("Could not find the AP\n"));
+ *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+ wl->extra_buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (struct wl_bss_info *)(wl->extra_buf + 4);
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ err = -EIO;
+ goto update_bss_info_out;
+ }
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ goto update_bss_info_out;
+
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
+ } else {
+ WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+ beacon_interval = bss->beacon_interval;
+ cfg80211_put_bss(bss);
+ }
+
+ tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim) {
+ dtim_period = tim->data[1];
+ } else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information.
+ */
+ err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period), false);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ goto update_bss_info_out;
+ }
+ }
+
+ wl_update_prof(wl, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(wl, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+
+static s32
+wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid;
+
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ cfg80211_roamed(ndev,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+ NULL,
+#endif
+ curbssid,
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+ WL_DBG(("Report roaming result\n"));
+
+ wl_set_drv_status(wl, CONNECTED, ndev);
+
+ return err;
+}
+
+static s32
+wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+
+ WL_DBG((" enter\n"));
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ }
+ if (wl_get_drv_status(wl, CONNECTING, ndev)) {
+ wl_clr_drv_status(wl, CONNECTING, ndev);
+ if (completed) {
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ wl_set_drv_status(wl, CONNECTED, ndev);
+ }
+ cfg80211_connect_result(ndev,
+ curbssid,
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
+ GFP_KERNEL);
+ if (completed)
+ WL_INFO(("Report connect result - connection succeeded\n"));
+ else
+ WL_ERR(("Report connect result - connection failed\n"));
+ }
+ return err;
+}
+
+static s32
+wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u16 flags = ntoh16(e->flags);
+ enum nl80211_key_type key_type;
+
+ mutex_lock(&wl->usr_sync);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ key_type = NL80211_KEYTYPE_GROUP;
+ else
+ key_type = NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ NULL, GFP_KERNEL);
+ mutex_unlock(&wl->usr_sync);
+
+ return 0;
+}
+
+static s32
+wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(wl, SCANNING, ndev)) {
+ WL_ERR(("scan is not ready \n"));
+ return err;
+ }
+ if (wl->iscan_on && wl->iscan_kickstart)
+ return wl_wakeup_iscan(wl_to_iscan(wl));
+
+ mutex_lock(&wl->usr_sync);
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform), false);
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ wl->bss_list = wl->scan_results;
+ bss_list = wl->bss_list;
+ memset(bss_list, 0, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+ if (unlikely(err)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(wl);
+
+scan_done_out:
+ del_timer_sync(&wl->scan_timeout);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ WL_DBG(("cfg80211_scan_done\n"));
+ cfg80211_scan_done(wl->scan_request, false);
+ wl->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody)
+{
+ struct dot11_management_header *hdr;
+ u32 totlen = 0;
+ s32 err = 0;
+ u8 *offset;
+ u32 prebody_len = *body_len;
+ switch (fc) {
+ case FC_ASSOC_REQ:
+ /* capability , listen interval */
+ totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+ break;
+
+ case FC_REASSOC_REQ:
+ /* capability, listen inteval, ap address */
+ totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+ break;
+ }
+ totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+ *pheader = kzalloc(totlen, GFP_KERNEL);
+ if (*pheader == NULL) {
+ WL_ERR(("memory alloc failed \n"));
+ return -ENOMEM;
+ }
+ hdr = (struct dot11_management_header *) (*pheader);
+ hdr->fc = htol16(fc);
+ hdr->durid = 0;
+ hdr->seq = 0;
+ offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+ bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+ bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+ bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+ bcopy((const char*)pbody, offset, prebody_len);
+ *body_len = totlen;
+ return err;
+}
+static s32
+wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct ieee80211_supported_band *band;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct ether_addr da;
+ struct ether_addr bssid;
+ bool isfree = false;
+ s32 err = 0;
+ s32 freq;
+ struct net_device *dev = NULL;
+ wifi_p2p_pub_act_frame_t *act_frm = NULL;
+ wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+ wl_event_rx_frame_data_t *rxframe =
+ (wl_event_rx_frame_data_t*)data;
+ u32 event = ntoh32(e->event_type);
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+ u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+
+ if (wl->p2p_net == ndev) {
+ dev = wl_to_prmry_ndev(wl);
+ } else {
+ dev = ndev;
+ }
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band"));
+ return -EINVAL;
+ }
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+ if (event == WLC_E_ACTION_FRAME_RX) {
+ wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr",
+ NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync);
+
+ wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+ &mgmt_frame, &mgmt_frame_len,
+ (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+ if (err < 0) {
+ WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n",
+ __func__, mgmt_frame_len, channel, freq));
+ goto exit;
+ }
+ isfree = true;
+ if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ act_frm = (wifi_p2p_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ p2p_act_frm = (wifi_p2p_action_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ (void) p2p_act_frm;
+ } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ (void) sd_act_frm;
+ }
+ wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN);
+ /*
+ * After complete GO Negotiation, roll back to mpc mode
+ */
+ if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) ||
+ (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) {
+ wldev_iovar_setint(dev, "mpc", 1);
+ }
+ } else {
+ mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+ }
+
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__,
+ mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+
+ if (isfree)
+ kfree(mgmt_frame);
+exit:
+ return 0;
+}
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+ WL_DBG(("Enter \n"));
+ conf->frag_threshold = (u32)-1;
+ conf->rts_threshold = (u32)-1;
+ conf->retry_short = (u32)-1;
+ conf->retry_long = (u32)-1;
+ conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev)
+{
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ memset(profile, 0, sizeof(struct wl_profile));
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct wl_priv *wl)
+{
+ memset(wl->evt_handler, 0, sizeof(wl->evt_handler));
+
+ wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+ wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+ wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+ wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+
+}
+
+static s32 wl_init_priv_mem(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+ wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->scan_results)) {
+ WL_ERR(("Scan results alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+ if (unlikely(!wl->conf)) {
+ WL_ERR(("wl_conf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->scan_req_int =
+ (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+ if (unlikely(!wl->scan_req_int)) {
+ WL_ERR(("Scan req alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!wl->ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!wl->escan_ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->extra_buf)) {
+ WL_ERR(("Extra buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+ if (unlikely(!wl->iscan)) {
+ WL_ERR(("Iscan buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+ if (unlikely(!wl->pmk_list)) {
+ WL_ERR(("pmk list alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL);
+ if (unlikely(!wl->sta_info)) {
+ WL_ERR(("sta info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->afx_hdl = (void *)kzalloc(sizeof(*wl->afx_hdl), GFP_KERNEL);
+ if (unlikely(!wl->afx_hdl)) {
+ WL_ERR(("afx hdl alloc failed\n"));
+ goto init_priv_mem_out;
+ } else {
+ init_completion(&wl->act_frm_scan);
+ INIT_WORK(&wl->afx_hdl->work, wl_cfg80211_afx_handler);
+ }
+ return 0;
+
+init_priv_mem_out:
+ wl_deinit_priv_mem(wl);
+
+ return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct wl_priv *wl)
+{
+ kfree(wl->scan_results);
+ wl->scan_results = NULL;
+ kfree(wl->conf);
+ wl->conf = NULL;
+ kfree(wl->scan_req_int);
+ wl->scan_req_int = NULL;
+ kfree(wl->ioctl_buf);
+ wl->ioctl_buf = NULL;
+ kfree(wl->escan_ioctl_buf);
+ wl->escan_ioctl_buf = NULL;
+ kfree(wl->extra_buf);
+ wl->extra_buf = NULL;
+ kfree(wl->iscan);
+ wl->iscan = NULL;
+ kfree(wl->pmk_list);
+ wl->pmk_list = NULL;
+ kfree(wl->sta_info);
+ wl->sta_info = NULL;
+ if (wl->afx_hdl) {
+ cancel_work_sync(&wl->afx_hdl->work);
+ kfree(wl->afx_hdl);
+ wl->afx_hdl = NULL;
+ }
+
+ if (wl->ap_info) {
+ kfree(wl->ap_info->wpa_ie);
+ kfree(wl->ap_info->rsn_ie);
+ kfree(wl->ap_info->wps_ie);
+ kfree(wl->ap_info);
+ wl->ap_info = NULL;
+ }
+}
+
+static s32 wl_create_event_handler(struct wl_priv *wl)
+{
+ int ret = 0;
+ WL_DBG(("Enter \n"));
+
+ /* Do not use DHD in cfg driver */
+ wl->event_tsk.thr_pid = -1;
+ PROC_START(wl_event_handler, wl, &wl->event_tsk, 0);
+ if (wl->event_tsk.thr_pid < 0)
+ ret = -ENOMEM;
+ return ret;
+}
+
+static void wl_destroy_event_handler(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0)
+ PROC_STOP(&wl->event_tsk);
+}
+
+static void wl_term_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ WL_TRACE(("In\n"));
+ if (wl->iscan_on && iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ WL_INFO(("SIGTERM\n"));
+ send_sig(SIGTERM, iscan->tsk, 1);
+ WL_DBG(("kthread_stop\n"));
+ kthread_stop(iscan->tsk);
+ iscan->tsk = NULL;
+ }
+}
+
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
+{
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(wl, SCANNING, ndev)) {
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ WL_ERR(("Scan complete while device not scanning\n"));
+ return;
+ }
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ wl->iscan_kickstart = false;
+}
+
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
+{
+ if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
+ WL_DBG(("wake up iscan\n"));
+ up(&iscan->sync);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static s32
+wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list)
+{
+ struct wl_iscan_results list;
+ struct wl_scan_results *results;
+ struct wl_iscan_results *list_buf;
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
+ list_buf = (struct wl_iscan_results *)iscan->scan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WL_ISCAN_BUF_MAX);
+ err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
+ WL_ISCAN_BUF_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_DBG(("results->count = %d\n", results->count));
+ WL_DBG(("results->buflen = %d\n", results->buflen));
+ *status = dtoh32(list_buf->status);
+ *bss_list = results;
+
+ return err;
+}
+
+static s32 wl_iscan_done(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_notify_iscan_complete(iscan, false);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_pending(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_inprogress(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+ mutex_unlock(&wl->usr_sync);
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_aborted(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_notify_iscan_complete(iscan, true);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_thread(void *data)
+{
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ u32 status;
+ int err = 0;
+
+ allow_signal(SIGTERM);
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (likely(!down_interruptible(&iscan->sync))) {
+ if (kthread_should_stop())
+ break;
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ mutex_lock(&wl->usr_sync);
+ err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
+ if (unlikely(err)) {
+ status = WL_SCAN_RESULTS_ABORTED;
+ WL_ERR(("Abort iscan\n"));
+ }
+ mutex_unlock(&wl->usr_sync);
+ iscan->iscan_handler[status] (wl);
+ }
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ WL_DBG(("%s was terminated\n", __func__));
+
+ return 0;
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ if (wl->scan_request) {
+ WL_ERR(("timer expired\n"));
+ if (wl->escan_on)
+ wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true);
+ else
+ wl_notify_iscan_complete(wl_to_iscan(wl), true);
+ }
+}
+static void wl_iscan_timer(unsigned long data)
+{
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+
+ if (iscan) {
+ iscan->timer_on = 0;
+ WL_DBG(("timer expired\n"));
+ wl_wakeup_iscan(iscan);
+ }
+}
+
+static s32 wl_invoke_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on && !iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return err;
+}
+
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan)
+{
+ memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler));
+ iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
+ iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
+ iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
+}
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+ unsigned long state,
+ void *ndev)
+{
+ struct net_device *dev = ndev;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+ WL_DBG(("Enter \n"));
+ if (!wdev || !wl || dev == wl_to_prmry_ndev(wl))
+ return NOTIFY_DONE;
+ switch (state) {
+ case NETDEV_UNREGISTER:
+ /* after calling list_del_rcu(&wdev->list) */
+ wl_dealloc_netinfo(wl, ndev);
+ break;
+ case NETDEV_GOING_DOWN:
+ /* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+ * In front of door, the function checks
+ * whether current scan is working or not.
+ * If the scanning is still working, wdev_cleanup_work call WARN_ON and
+ * make the scan done forcibly.
+ */
+ if (wl_get_drv_status(wl, SCANNING, dev)) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ }
+ break;
+ }
+ return NOTIFY_DONE;
+}
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+ .notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+
+static s32 wl_notify_escan_complete(struct wl_priv *wl,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ wl_scan_params_t *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+
+ WL_DBG(("Enter \n"));
+
+ if (wl->scan_request) {
+ if (wl->scan_request->dev == wl->p2p_net)
+ dev = wl_to_prmry_ndev(wl);
+ else
+ dev = wl->scan_request->dev;
+ }
+ else {
+ WL_ERR(("wl->scan_request is NULL may be internal scan."
+ "doing scan_abort for ndev %p primary %p p2p_net %p",
+ ndev, wl_to_prmry_ndev(wl), wl->p2p_net));
+ dev = ndev;
+ }
+ if (fw_abort) {
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+ if (params == NULL) {
+ WL_ERR(("scan params allocation failed \n"));
+ err = -ENOMEM;
+ } else if (!in_atomic()) {
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+ if (err < 0) {
+ WL_ERR(("scan abort failed \n"));
+ }
+ }
+ }
+ if (timer_pending(&wl->scan_timeout))
+ del_timer_sync(&wl->scan_timeout);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ if (p2p_is_on(wl))
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, dev);
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ if (params)
+ kfree(params);
+
+ return err;
+}
+
+static s32 wl_escan_handler(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_bss_info_t *bi;
+ wl_escan_result_t *escan_result;
+ wl_bss_info_t *bss = NULL;
+ wl_scan_results_t *list;
+ u32 bi_length;
+ u32 i;
+ u8 *p2p_dev_addr = NULL;
+
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(wl, SCANNING)) {
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM))
+ ndev = wl->afx_hdl->dev;
+ else
+ ndev = wl->escan_info.ndev;
+
+ }
+ if (!ndev || !wl->escan_on ||
+ !wl_get_drv_status(wl, SCANNING, ndev)) {
+ WL_ERR(("escan is not ready ndev %p wl->escan_on %d drv_status 0x%x\n",
+ ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev)));
+ return err;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+ escan_result = (wl_escan_result_t *) data;
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+
+ if (!(wl_to_wiphy(wl)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_ERR(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ wl->afx_hdl->pending_tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = CHSPEC_CHANNEL(
+ wl_chspec_driver_to_host(bi->chanspec));
+ WL_DBG(("ACTION FRAME SCAN : Peer found, channel : %d\n", channel));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl->afx_hdl->peer_chan = channel;
+ complete(&wl->act_frm_scan);
+ goto exit;
+ }
+
+ } else {
+ list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+ }
+#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+ if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ bss->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ }
+
+ goto exit;
+ }
+ }
+ memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ }
+
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ WL_INFO(("ESCAN COMPLETED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, false, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else if (status == WLC_E_STATUS_ABORT) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ wl_clr_p2p_status(wl, SCANNING);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ WL_INFO(("ESCAN ABORTED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, true, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, true, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+exit:
+ return err;
+}
+
+static s32 wl_init_scan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on) {
+ iscan->dev = wl_to_prmry_ndev(wl);
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ wl_init_iscan_handler(iscan);
+ iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (unsigned long) iscan;
+ iscan->timer.function = wl_iscan_timer;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ iscan->data = wl;
+ } else if (wl->escan_on) {
+ wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+ /* Init scan_timeout timer */
+ init_timer(&wl->scan_timeout);
+ wl->scan_timeout.data = (unsigned long) wl;
+ wl->scan_timeout.function = wl_scan_timeout;
+
+ return err;
+}
+
+static s32 wl_init_priv(struct wl_priv *wl)
+{
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ wl->scan_request = NULL;
+ wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+ wl->iscan_on = false;
+ wl->escan_on = true;
+ wl->roam_on = false;
+ wl->iscan_kickstart = false;
+ wl->active_scan = true;
+ wl->rf_blocked = false;
+ spin_lock_init(&wl->cfgdrv_lock);
+ mutex_init(&wl->ioctl_buf_sync);
+ init_waitqueue_head(&wl->netif_change_event);
+ wl_init_eq(wl);
+ err = wl_init_priv_mem(wl);
+ if (err)
+ return err;
+ if (wl_create_event_handler(wl))
+ return -ENOMEM;
+ wl_init_event_handler(wl);
+ mutex_init(&wl->usr_sync);
+ err = wl_init_scan(wl);
+ if (err)
+ return err;
+ wl_init_conf(wl->conf);
+ wl_init_prof(wl, ndev);
+ wl_link_down(wl);
+ DNGL_FUNC(dhd_cfg80211_init, (wl));
+
+ return err;
+}
+
+static void wl_deinit_priv(struct wl_priv *wl)
+{
+ DNGL_FUNC(dhd_cfg80211_deinit, (wl));
+ wl_destroy_event_handler(wl);
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ del_timer_sync(&wl->scan_timeout);
+ wl_term_iscan(wl);
+ wl_deinit_priv_mem(wl);
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+}
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+ WL_TRACE(("Enter \n"));
+
+ if (wl_cfgp2p_register_ndev(wl) < 0) {
+ WL_ERR(("%s: P2P attach failed. \n", __func__));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static s32 wl_cfg80211_detach_p2p(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wireless_dev *wdev = wl->p2p_wdev;
+
+ WL_DBG(("Enter \n"));
+ if (!wdev || !wl) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+
+ wl_cfgp2p_unregister_ndev(wl);
+
+ wl->p2p_wdev = NULL;
+ wl->p2p_net = NULL;
+ WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+ kfree(wdev);
+
+ return 0;
+}
+#endif /* defined(WLP2P) && defined(WL_ENABLE_P2P_IF) */
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct wl_priv * wl = NULL;
+ s32 err = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ wl = wlcfg_drv_priv;
+ if (wl && !wl_get_drv_status(wl, READY, ndev)) {
+ if (wl->wdev &&
+ wl_cfgp2p_supported(wl, ndev)) {
+#if !defined(WL_ENABLE_P2P_IF)
+ wl->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+#endif
+ if ((err = wl_cfgp2p_init_priv(wl)) != 0)
+ goto fail;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ if (wl->p2p_net) {
+ /* Update MAC addr for p2p0 interface here. */
+ memcpy(wl->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ wl->p2p_net->dev_addr[0] |= 0x02;
+ printk("%s: p2p_dev_addr="MACSTR "\n",
+ wl->p2p_net->name, MAC2STR(wl->p2p_net->dev_addr));
+ } else {
+ WL_ERR(("p2p_net not yet populated."
+ " Couldn't update the MAC Address for p2p0 \n"));
+ return -ENODEV;
+ }
+#endif /* defined(WLP2P) && (WL_ENABLE_P2P_IF) */
+
+ wl->p2p_supported = true;
+ }
+ } else
+ return -ENODEV;
+ wl_set_drv_status(wl, READY, ndev);
+fail:
+ return err;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+{
+ struct wireless_dev *wdev;
+ struct wl_priv *wl;
+ s32 err = 0;
+ struct device *dev;
+
+ WL_TRACE(("In\n"));
+ if (!ndev) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+ dev = wl_cfg80211_get_parent_dev();
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+ err = wl_setup_wiphy(wdev, dev);
+ if (unlikely(err)) {
+ kfree(wdev);
+ return -ENOMEM;
+ }
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+ wl = (struct wl_priv *)wiphy_priv(wdev->wiphy);
+ wl->wdev = wdev;
+ wl->pub = data;
+ INIT_LIST_HEAD(&wl->net_list);
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ err = wl_alloc_netinfo(wl, ndev, wdev, WL_MODE_BSS);
+ if (err) {
+ WL_ERR(("Failed to alloc net_info (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = wl_init_priv(wl);
+ if (err) {
+ WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ err = wl_setup_rfkill(wl, TRUE);
+ if (err) {
+ WL_ERR(("Failed to setup rfkill %d\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ if (err) {
+ WL_ERR(("Failed to register notifierl %d\n", err));
+ goto cfg80211_attach_out;
+ }
+#if defined(COEX_DHCP)
+ if (wl_cfg80211_btcoex_init(wl))
+ goto cfg80211_attach_out;
+#endif
+
+ wlcfg_drv_priv = wl;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ err = wl_cfg80211_attach_p2p();
+ if (err)
+ goto cfg80211_attach_out;
+#endif
+
+ return err;
+
+cfg80211_attach_out:
+ err = wl_setup_rfkill(wl, FALSE);
+ wl_free_wdev(wl);
+ return err;
+}
+
+void wl_cfg80211_detach(void *para)
+{
+ struct wl_priv *wl;
+
+ (void)para;
+ wl = wlcfg_drv_priv;
+
+ WL_TRACE(("In\n"));
+
+#if defined(COEX_DHCP)
+ wl_cfg80211_btcoex_deinit(wl);
+#endif
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ wl_cfg80211_detach_p2p();
+#endif
+ wl_setup_rfkill(wl, FALSE);
+ if (wl->p2p_supported)
+ wl_cfgp2p_deinit_priv(wl);
+ wl_deinit_priv(wl);
+ wlcfg_drv_priv = NULL;
+ wl_cfg80211_clear_parent_dev();
+ wl_free_wdev(wl);
+ /* PLEASE do NOT call any function after wl_free_wdev, the driver's private structure "wl",
+ * which is the private part of wiphy, has been freed in wl_free_wdev !!!!!!!!!!!
+ */
+}
+
+static void wl_wakeup_event(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(wl->pub);
+ up(&wl->event_tsk.sema);
+ }
+}
+
+static int wl_is_p2p_event(struct wl_event_q *e)
+{
+ switch (e->etype) {
+ /* We have to seperate out the P2P events received
+ * on primary interface so that it can be send up
+ * via p2p0 interface.
+ */
+ case WLC_E_P2P_PROBREQ_MSG:
+ case WLC_E_P2P_DISC_LISTEN_COMPLETE:
+ case WLC_E_ACTION_FRAME_RX:
+ case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
+ case WLC_E_ACTION_FRAME_COMPLETE:
+
+ if (e->emsg.ifidx != 0) {
+ WL_TRACE(("P2P Event on Virtual I/F (ifidx:%d) \n",
+ e->emsg.ifidx));
+ /* We are only bothered about the P2P events received
+ * on primary interface. For rest of them return false
+ * so that it is sent over the interface corresponding
+ * to the ifidx.
+ */
+ return FALSE;
+ } else {
+ WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)."
+ " Sent it to p2p0 \n", e->emsg.ifidx));
+ return TRUE;
+ }
+ break;
+
+ default:
+ WL_TRACE(("NON-P2P Event %d on ifidx (ifidx:%d) \n",
+ e->etype, e->emsg.ifidx));
+ return FALSE;
+ }
+}
+
+static s32 wl_event_handler(void *data)
+{
+ struct net_device *netdev;
+ struct wl_priv *wl = NULL;
+ struct wl_event_q *e;
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+ wl = (struct wl_priv *)tsk->parent;
+ DAEMONIZE("dhd_cfg80211_event");
+ complete(&tsk->completed);
+
+ while (down_interruptible (&tsk->sema) == 0) {
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated)
+ break;
+ while ((e = wl_deq_event(wl))) {
+ WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+ /* All P2P device address related events comes on primary interface since
+ * there is no corresponding bsscfg for P2P interface. Map it to p2p0
+ * interface.
+ */
+ if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_net)) {
+ netdev = wl->p2p_net;
+ } else {
+ netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx);
+ }
+ if (!netdev)
+ netdev = wl_to_prmry_ndev(wl);
+ if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) {
+ wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata);
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ wl_put_event(e);
+ }
+ DHD_OS_WAKE_UNLOCK(wl->pub);
+ }
+ WL_ERR(("%s was terminated\n", __func__));
+ complete_and_exit(&tsk->completed, 0);
+ return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+ u32 event_type = ntoh32(e->event_type);
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+#if (WL_DBG_LEVEL > 0)
+ s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+ wl_dbg_estr[event_type] : (s8 *) "Unknown";
+ WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+ }
+ else if (event_type == WLC_E_PFN_NET_LOST) {
+ WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+ }
+
+ if (likely(!wl_enq_event(wl, ndev, event_type, e, data)))
+ wl_wakeup_event(wl);
+}
+
+static void wl_init_eq(struct wl_priv *wl)
+{
+ wl_init_eq_lock(wl);
+ INIT_LIST_HEAD(&wl->eq_list);
+}
+
+static void wl_flush_eq(struct wl_priv *wl)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ while (!list_empty(&wl->eq_list)) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_unlock_eq(wl, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ if (likely(!list_empty(&wl->eq_list))) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_unlock_eq(wl, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg,
+ void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ WL_ERR(("event alloc failed\n"));
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_lock_eq(wl);
+ list_add_tail(&e->eq_list, &wl->eq_list);
+ wl_unlock_eq(wl, flags);
+
+ return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+ kfree(e);
+}
+
+static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype)
+{
+ s32 infra = 0;
+ s32 err = 0;
+ s32 mode = 0;
+ switch (iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR(("type (%d) : currently we do not support this mode\n",
+ iftype));
+ err = -EINVAL;
+ return err;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR(("invalid type (%d)\n", iftype));
+ return err;
+ }
+ infra = htod32(infra);
+ err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ return err;
+ }
+
+ wl_set_mode_by_netdev(wl, ndev, mode);
+
+ return 0;
+}
+
+static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+
+ /* Setup event_msgs */
+ bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+ if (add) {
+ setbit(eventmask, event);
+ } else {
+ clrbit(eventmask, event);
+ }
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ return err;
+
+}
+
+s32 wl_update_wiphybands(struct wl_priv *wl)
+{
+ struct wiphy *wiphy;
+ u32 bandlist[3];
+ u32 nband = 0;
+ u32 i = 0;
+ s32 err = 0;
+ int nmode = 0;
+ int bw_40 = 0;
+ int index = 0;
+
+ WL_DBG(("Entry"));
+ memset(bandlist, 0, sizeof(bandlist));
+ err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_BANDLIST, bandlist,
+ sizeof(bandlist), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ wiphy = wl_to_wiphy(wl);
+ nband = bandlist[0];
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+ err = wldev_iovar_getint(wl_to_prmry_ndev(wl), "nmode", &nmode);
+ if (unlikely(err)) {
+ WL_ERR(("error reading nmode (%d)\n", err));
+ }
+ else {
+ /* For nmodeonly check bw cap */
+ err = wldev_iovar_getint(wl_to_prmry_ndev(wl), "mimo_bw_cap", &bw_40);
+ if (unlikely(err)) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+ }
+ }
+
+ for (i = 1; i <= nband && i < sizeof(bandlist)/sizeof(u32); i++) {
+ index = -1;
+ if (bandlist[i] == WLC_BAND_5G) {
+ wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &__wl_band_5ghz_a;
+ index = IEEE80211_BAND_5GHZ;
+ } else if (bandlist[i] == WLC_BAND_2G) {
+ wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &__wl_band_2ghz;
+ index = IEEE80211_BAND_2GHZ;
+ }
+
+ if ((index >= 0) && nmode) {
+ wiphy->bands[index]->ht_cap.cap =
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40
+ | IEEE80211_HT_CAP_MAX_AMSDU;
+ wiphy->bands[index]->ht_cap.ht_supported = TRUE;
+ wiphy->bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ wiphy->bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ }
+
+ if ((index >= 0) && bw_40) {
+ wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ }
+ }
+
+ wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+ return err;
+}
+
+static s32 __wl_cfg80211_up(struct wl_priv *wl)
+{
+ s32 err = 0;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ WL_DBG(("In\n"));
+
+ err = dhd_config_dongle(wl, false);
+ if (unlikely(err))
+ return err;
+
+ err = wl_config_ifmode(wl, ndev, wdev->iftype);
+ if (unlikely(err && err != -EINPROGRESS)) {
+ WL_ERR(("wl_config_ifmode failed\n"));
+ }
+ err = wl_update_wiphybands(wl);
+ if (unlikely(err)) {
+ WL_ERR(("wl_update_wiphybands failed\n"));
+ }
+
+ err = dhd_monitor_init(wl->pub);
+ err = wl_invoke_iscan(wl);
+ wl_set_drv_status(wl, READY, ndev);
+ return err;
+}
+
+static s32 __wl_cfg80211_down(struct wl_priv *wl)
+{
+ s32 err = 0;
+ unsigned long flags;
+ struct net_info *iter, *next;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ WL_DBG(("In\n"));
+ /* Check if cfg80211 interface is already down */
+ if (!wl_get_drv_status(wl, READY, ndev))
+ return err; /* it is even not ready */
+ for_each_ndev(wl, iter, next)
+ wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
+
+ wl_term_iscan(wl);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ for_each_ndev(wl, iter, next) {
+ wl_clr_drv_status(wl, READY, iter->ndev);
+ wl_clr_drv_status(wl, SCANNING, iter->ndev);
+ wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ wl_clr_drv_status(wl, CONNECTING, iter->ndev);
+ wl_clr_drv_status(wl, CONNECTED, iter->ndev);
+ wl_clr_drv_status(wl, DISCONNECTING, iter->ndev);
+ wl_clr_drv_status(wl, AP_CREATED, iter->ndev);
+ wl_clr_drv_status(wl, AP_CREATING, iter->ndev);
+ }
+ wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype =
+ NL80211_IFTYPE_STATION;
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+
+ DNGL_FUNC(dhd_cfg80211_down, (wl));
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ if (wl->p2p_supported)
+ wl_cfgp2p_down(wl);
+ dhd_monitor_uninit();
+
+ return err;
+}
+
+s32 wl_cfg80211_up(void *para)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+ int val = 1;
+
+ (void)para;
+ WL_DBG(("In\n"));
+ wl = wlcfg_drv_priv;
+
+ if ((err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_VERSION, &val,
+ sizeof(int), false) < 0)) {
+ WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+ return err;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
+ return BCME_VERSION;
+ }
+ ioctl_version = val;
+ WL_ERR(("WLC_GET_VERSION=%d\n", ioctl_version));
+
+ mutex_lock(&wl->usr_sync);
+ wl_cfg80211_attach_post(wl_to_prmry_ndev(wl));
+ err = __wl_cfg80211_up(wl);
+ if (err)
+ WL_ERR(("__wl_cfg80211_up failed\n"));
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ WL_ERR(("In : chip crash eventing\n"));
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+ if (wl != NULL) {
+ wl_link_down(wl);
+ }
+ return 0;
+}
+
+s32 wl_cfg80211_down(void *para)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+
+ (void)para;
+ WL_DBG(("In\n"));
+ wl = wlcfg_drv_priv;
+ mutex_lock(&wl->usr_sync);
+ err = __wl_cfg80211_down(wl);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item)
+{
+ unsigned long flags;
+ void *rptr = NULL;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ if (!profile)
+ return NULL;
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SEC:
+ rptr = &profile->sec;
+ break;
+ case WL_PROF_ACT:
+ rptr = &profile->active;
+ break;
+ case WL_PROF_BSSID:
+ rptr = profile->bssid;
+ break;
+ case WL_PROF_SSID:
+ rptr = &profile->ssid;
+ break;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ if (!rptr)
+ WL_ERR(("invalid item (%d)\n", item));
+ return rptr;
+}
+
+static s32
+wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, s32 item)
+{
+ s32 err = 0;
+ struct wlc_ssid *ssid;
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ if (!profile)
+ return WL_INVALID;
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (wlc_ssid_t *) data;
+ memset(profile->ssid.SSID, 0,
+ sizeof(profile->ssid.SSID));
+ memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+ profile->ssid.SSID_len = ssid->SSID_len;
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+ else
+ memset(profile->bssid, 0, ETHER_ADDR_LEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&profile->sec, data, sizeof(profile->sec));
+ break;
+ case WL_PROF_ACT:
+ profile->active = *(bool *)data;
+ break;
+ case WL_PROF_BEACONINT:
+ profile->beacon_interval = *(u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ profile->dtim_period = *(u8 *)data;
+ break;
+ default:
+ WL_ERR(("unsupported item (%d)\n", item));
+ err = -EOPNOTSUPP;
+ break;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+ /*
+ * prohibit to change debug level
+ * by insmod parameter.
+ * eventually debug level will be configured
+ * in compile time by using CONFIG_XXX
+ */
+ /* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev)
+{
+ return wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct wl_priv *wl)
+{
+ return wl->ibss_starter;
+}
+
+static void wl_rst_ie(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
+
+ return err;
+}
+
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei_stream crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+ ie->offset += ie_size;
+
+ return err;
+}
+
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset > dst_size)) {
+ WL_ERR(("dst_size is not enough\n"));
+ return -ENOSPC;
+ }
+ memcpy(dst, &ie->buf[0], ie->offset);
+
+ return err;
+}
+
+static u32 wl_get_ielen(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ return ie->offset;
+}
+
+static void wl_link_up(struct wl_priv *wl)
+{
+ wl->link_up = true;
+}
+
+static void wl_link_down(struct wl_priv *wl)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+
+ WL_DBG(("In\n"));
+ wl->link_up = false;
+ conn_info->req_ie_len = 0;
+ conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct wl_priv *wl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->eq_lock, flags);
+ return flags;
+}
+
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&wl->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct wl_priv *wl)
+{
+ spin_lock_init(&wl->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+ if (ms < 1000 / HZ) {
+ cond_resched();
+ mdelay(ms);
+ } else {
+ msleep(ms);
+ }
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct ether_addr p2pif_addr;
+ struct ether_addr primary_mac;
+ if (!wl->p2p)
+ return -1;
+ if (!p2p_is_on(wl)) {
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+ } else {
+ memcpy(p2pdev_addr->octet,
+ wl->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+ }
+
+
+ return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_get_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_ps(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type)
+{
+ struct wl_priv *wl;
+ struct net_device *ndev = NULL;
+ s32 ret = 0;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ wl = wlcfg_drv_priv;
+ if (wl->p2p && wl->p2p->vif_created) {
+ ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION);
+ } else if (wl_get_drv_status(wl, AP_CREATING, net) ||
+ wl_get_drv_status(wl, AP_CREATED, net)) {
+ ndev = net;
+ bssidx = 0;
+ }
+ if (ndev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
+ }
+ if (pktflag)
+ ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len);
+ }
+
+ return ret;
+}
+
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!wl)
+ return -EINVAL;
+
+ wl->rf_blocked = blocked;
+
+ return 0;
+}
+
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup)
+{
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!wl)
+ return -EINVAL;
+ if (setup) {
+ wl->rfkill = rfkill_alloc("brcmfmac-wifi",
+ wl_cfg80211_get_parent_dev(),
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl);
+
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rfkill_register(wl->rfkill);
+
+ if (err)
+ rfkill_destroy(wl->rfkill);
+ } else {
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rfkill_unregister(wl->rfkill);
+ rfkill_destroy(wl->rfkill);
+ }
+
+err_out:
+ return err;
+}
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+ return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+ cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+ cfg80211_parent_dev = NULL;
+}
+
+static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac)
+{
+ wldev_iovar_getbuf_bsscfg(wl_to_prmry_ndev(wl), "cur_etheraddr", NULL,
+ 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
+ memcpy(mac->octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+ struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
+
+ if (!wl || !wl->wdev)
+ return -EINVAL;
+
+ if (dhd_do_driver_init(wl->wdev->netdev) < 0)
+ return -1;
+
+ return 0;
+}
+
+void wl_cfg80211_enable_trace(int level)
+{
+ wl_dbg_level |= WL_DBG_DBG;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 000000000000..edf8a300daad
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,658 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h 316895 2012-02-24 00:05:41Z $
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct wl_priv;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WL_DBG_NONE 0
+#define WL_DBG_TRACE (1 << 4)
+#define WL_DBG_SCAN (1 << 3)
+#define WL_DBG_DBG (1 << 2)
+#define WL_DBG_INFO (1 << 1)
+#define WL_DBG_ERR (1 << 0)
+
+/* 0 invalidates all debug messages. default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define WL_ERR(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_INFO
+#undef WL_INFO
+#endif
+#define WL_INFO(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFG80211-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define WL_SCAN(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_SCAN) { \
+ printk(KERN_ERR "CFG80211-SCAN) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define WL_TRACE(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_TRACE) { \
+ printk(KERN_ERR "CFG80211-TRACE) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFG80211-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#else /* !(WL_DBG_LEVEL > 0) */
+#define WL_DBG(args)
+#endif /* (WL_DBG_LEVEL > 0) */
+
+
+#define WL_SCAN_RETRY_MAX 3
+#define WL_NUM_PMKIDS_MAX MAXPMKID
+#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_TLV_INFO_MAX 1024
+#define WL_SCAN_IE_LEN_MAX 2048
+#define WL_BSS_INFO_MAX 2048
+#define WL_ASSOC_INFO_MAX 512
+#define WL_IOCTL_LEN_MAX 1024
+#define WL_EXTRA_BUF_MAX 2048
+#define WL_ISCAN_BUF_MAX 2048
+#define WL_ISCAN_TIMER_INTERVAL_MS 3000
+#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX 256
+#define WL_FILE_NAME_MAX 256
+#define WL_DWELL_TIME 200
+#define WL_MED_DWELL_TIME 400
+#define WL_LONG_DWELL_TIME 1000
+#define IFACE_MAX_CNT 2
+
+#define WL_SCAN_TIMER_INTERVAL_MS 8000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 5
+#define WL_INVALID -1
+
+/* driver status */
+enum wl_status {
+ WL_STATUS_READY = 0,
+ WL_STATUS_SCANNING,
+ WL_STATUS_SCAN_ABORTING,
+ WL_STATUS_CONNECTING,
+ WL_STATUS_CONNECTED,
+ WL_STATUS_DISCONNECTING,
+ WL_STATUS_AP_CREATING,
+ WL_STATUS_AP_CREATED,
+ WL_STATUS_SENDING_ACT_FRM
+};
+
+/* wi-fi mode */
+enum wl_mode {
+ WL_MODE_BSS,
+ WL_MODE_IBSS,
+ WL_MODE_AP
+};
+
+/* driver profile list */
+enum wl_prof_list {
+ WL_PROF_MODE,
+ WL_PROF_SSID,
+ WL_PROF_SEC,
+ WL_PROF_IBSS,
+ WL_PROF_BAND,
+ WL_PROF_BSSID,
+ WL_PROF_ACT,
+ WL_PROF_BEACONINT,
+ WL_PROF_DTIMPERIOD
+};
+
+/* driver iscan state */
+enum wl_iscan_state {
+ WL_ISCAN_STATE_IDLE,
+ WL_ISCAN_STATE_SCANING
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+ WL_FW_LOADING_DONE,
+ WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+ WL_BEACON = 0x1,
+ WL_PROBE_RESP = 0x2,
+ WL_ASSOC_RESP = 0x4
+};
+/* beacon / probe_response */
+struct beacon_proberesp {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+ u32 frag_threshold;
+ u32 rts_threshold;
+ u32 retry_short;
+ u32 retry_long;
+ s32 tx_power;
+ struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct wl_priv *wl,
+ struct net_device *ndev, const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+ u16 band;
+ u16 channel;
+ s16 rssi;
+ u16 frame_len;
+ u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+ struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+ u16 offset;
+ u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+ u32 wpa_versions;
+ u32 auth_type;
+ u32 cipher_pairwise;
+ u32 cipher_group;
+ u32 wpa_auth;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+ u8 beacon_interval; /* in millisecond */
+ u8 atim; /* in millisecond */
+ s8 join_only;
+ u8 band;
+ u8 channel;
+};
+
+/* wl driver profile */
+struct wl_profile {
+ u32 mode;
+ s32 band;
+ struct wlc_ssid ssid;
+ struct wl_security sec;
+ struct wl_ibss ibss;
+ u8 bssid[ETHER_ADDR_LEN];
+ u16 beacon_interval;
+ u8 dtim_period;
+ bool active;
+};
+
+struct net_info {
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wl_profile profile;
+ s32 mode;
+ unsigned long sme_state;
+ struct list_head list; /* list of all net_info structure */
+};
+typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl);
+
+/* iscan controller */
+struct wl_iscan_ctrl {
+ struct net_device *dev;
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ s32 state;
+ struct task_struct *tsk;
+ struct semaphore sync;
+ ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST];
+ void *data;
+ s8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s8 scan_buf[WL_ISCAN_BUF_MAX];
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+ u8 req_ie[MAX_REQ_LINE];
+ s32 req_ie_len;
+ u8 resp_ie[MAX_REQ_LINE];
+ s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+ const struct firmware *fw_entry;
+ unsigned long status;
+ u32 ptr;
+ s8 fw_name[WL_FILE_NAME_MAX];
+ s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+ u32 req_len;
+ u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+ u32 escan_state;
+ u8 escan_buf[ESCAN_BUF_SIZE];
+ struct wiphy *wiphy;
+ struct net_device *ndev;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ u8 *wps_ie;
+ bool security_mode;
+};
+struct btcoex_info {
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ u32 ts_dhcp_start; /* ms ts ecord time stats */
+ u32 ts_dhcp_ok; /* ms ts ecord time stats */
+ bool dhcp_done; /* flag, indicates that host done with
+ * dhcp before t1/t2 expiration
+ */
+ s32 bt_state;
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+struct sta_info {
+ /* Structure to hold WPS IE for a STA */
+ u8 probe_req_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
+ u32 assoc_req_ie_len;
+};
+
+struct afx_hdl {
+ wl_af_params_t *pending_tx_act_frm;
+ struct ether_addr pending_tx_dst_addr;
+ struct net_device *dev;
+ struct work_struct work;
+ u32 bssidx;
+ u32 retry;
+ s32 peer_chan;
+ bool ack_recv;
+};
+
+/* private data of cfg80211 interface */
+struct wl_priv {
+ struct wireless_dev *wdev; /* representing wl cfg80211 device */
+
+ struct wireless_dev *p2p_wdev; /* representing wl cfg80211 device for P2P */
+ struct net_device *p2p_net; /* reference to p2p0 interface */
+
+ struct wl_conf *conf;
+ struct cfg80211_scan_request *scan_request; /* scan request object */
+ EVENT_HANDLER evt_handler[WLC_E_LAST];
+ struct list_head eq_list; /* used for event queue */
+ struct list_head net_list; /* used for struct net_info */
+ spinlock_t eq_lock; /* for event queue synchronization */
+ spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */
+ struct completion act_frm_scan;
+ struct mutex usr_sync; /* maily for up/down synchronization */
+ struct wl_scan_results *bss_list;
+ struct wl_scan_results *scan_results;
+
+ /* scan request object for internal purpose */
+ struct wl_scan_req *scan_req_int;
+ /* information element object for internal purpose */
+ struct wl_ie ie;
+ struct wl_iscan_ctrl *iscan; /* iscan controller */
+
+ /* association information container */
+ struct wl_connect_info conn_info;
+
+ struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
+ tsk_ctl_t event_tsk; /* task of main event handler thread */
+ void *pub;
+ u32 iface_cnt;
+ u32 channel; /* current channel */
+ bool iscan_on; /* iscan on/off switch */
+ bool iscan_kickstart; /* indicate iscan already started */
+ bool escan_on; /* escan on/off switch */
+ struct escan_info escan_info; /* escan information */
+ bool active_scan; /* current scan mode */
+ bool ibss_starter; /* indicates this sta is ibss starter */
+ bool link_up; /* link/connection up flag */
+
+ /* indicate whether chip to support power save mode */
+ bool pwr_save;
+ bool roam_on; /* on/off switch for self-roaming */
+ bool scan_tried; /* indicates if first scan attempted */
+ u8 *ioctl_buf; /* ioctl buffer */
+ struct mutex ioctl_buf_sync;
+ u8 *escan_ioctl_buf;
+ u8 *extra_buf; /* maily to grab assoc information */
+ struct dentry *debugfsdir;
+ struct rfkill *rfkill;
+ bool rf_blocked;
+ struct ieee80211_channel remain_on_chan;
+ enum nl80211_channel_type remain_on_chan_type;
+ u64 send_action_id;
+ u64 last_roc_id;
+ wait_queue_head_t netif_change_event;
+ struct afx_hdl *afx_hdl;
+ struct ap_info *ap_info;
+ struct sta_info *sta_info;
+ struct p2p_info *p2p;
+ bool p2p_supported;
+ struct btcoex_info *btcoex_info;
+ struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+};
+
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+ return bss = bss ?
+ (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32
+wl_alloc_netinfo(struct wl_priv *wl, struct net_device *ndev,
+ struct wireless_dev * wdev, s32 mode)
+{
+ struct net_info *_net_info;
+ s32 err = 0;
+ if (wl->iface_cnt == IFACE_MAX_CNT)
+ return -ENOMEM;
+ _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+ if (!_net_info)
+ err = -ENOMEM;
+ else {
+ _net_info->mode = mode;
+ _net_info->ndev = ndev;
+ _net_info->wdev = wdev;
+ wl->iface_cnt++;
+ list_add(&_net_info->list, &wl->net_list);
+ }
+ return err;
+}
+static inline void
+wl_dealloc_netinfo(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ list_del(&_net_info->list);
+ wl->iface_cnt--;
+ if (_net_info->wdev) {
+ kfree(_net_info->wdev);
+ ndev->ieee80211_ptr = NULL;
+ }
+ kfree(_net_info);
+ }
+ }
+
+}
+static inline void
+wl_delete_all_netinfo(struct wl_priv *wl)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_del(&_net_info->list);
+ if (_net_info->wdev)
+ kfree(_net_info->wdev);
+ kfree(_net_info);
+ }
+ wl->iface_cnt = 0;
+}
+static inline bool
+wl_get_status_all(struct wl_priv *wl, s32 status)
+
+{
+ struct net_info *_net_info, *next;
+ u32 cnt = 0;
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (_net_info->ndev &&
+ test_bit(status, &_net_info->sme_state))
+ cnt++;
+ }
+ return cnt? true: false;
+}
+static inline void
+wl_set_status_by_netdev(struct wl_priv *wl, s32 status,
+ struct net_device *ndev, u32 op)
+{
+
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ switch (op) {
+ case 1:
+ set_bit(status, &_net_info->sme_state);
+ break;
+ case 2:
+ clear_bit(status, &_net_info->sme_state);
+ break;
+ case 4:
+ change_bit(status, &_net_info->sme_state);
+ break;
+ }
+ }
+
+ }
+
+}
+
+static inline u32
+wl_get_status_by_netdev(struct wl_priv *wl, s32 status,
+ struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return test_bit(status, &_net_info->sme_state);
+ }
+ return 0;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return _net_info->mode;
+ }
+ return -1;
+}
+
+
+static inline void
+wl_set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev,
+ s32 mode)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->mode = mode;
+ }
+}
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return &_net_info->profile;
+ }
+ return NULL;
+}
+#define wl_to_wiphy(w) (w->wdev->wiphy)
+#define wl_to_prmry_ndev(w) (w->wdev->netdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define wl_to_sr(w) (w->scan_req_int)
+#define wl_to_ie(w) (&w->ie)
+#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
+#define wl_to_iscan(w) (w->iscan)
+#define wl_to_conn(w) (&w->conn_info)
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(wl, stat) \
+ (wl_get_status_all(wl, WL_STATUS_ ## stat))
+#define wl_get_drv_status(wl, stat, ndev) \
+ (wl_get_status_by_netdev(wl, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 2))
+#define wl_chg_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i) \
+ for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(wl, iter, next) \
+ list_for_each_entry_safe(iter, next, &wl->net_list, list)
+
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+ ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+ (!_sme->crypto.n_ciphers_pairwise) && \
+ (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void *para);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+ void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+extern s32 wl_cfg80211_up(void *para);
+extern s32 wl_cfg80211_down(void *para);
+extern s32 wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
+ void* _net_attach);
+extern s32 wl_cfg80211_ifdel_ops(struct net_device *net);
+extern s32 wl_cfg80211_notify_ifdel(struct net_device *ndev);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_is_progress_ifchange(void);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_notify_ifchange(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(int level);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+
+#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 000000000000..eb76fd0078d3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,1967 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.c 321498 2012-03-15 12:54:13Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static s32
+wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete);
+
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+ .ndo_open = wl_cfgp2p_if_open,
+ .ndo_stop = wl_cfgp2p_if_stop,
+ .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+ .ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+
+ if (frame == NULL)
+ return false;
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+ return true;
+ }
+
+ return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_action_frame_t *act_frm;
+
+ if (frame == NULL)
+ return false;
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+ return true;
+ }
+
+ return false;
+}
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)
+ return false;
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+ else
+ return false;
+
+}
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ wifi_p2p_action_frame_t *act_frm;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+ if (!frame || frame_len <= 2)
+ return;
+
+ if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Req Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_GON_RSP:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Rsp Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_GON_CONF:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Confirm Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_INVITE_REQ:
+ CFGP2P_DBG(("%s P2P Invitation Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_INVITE_RSP:
+ CFGP2P_DBG(("%s P2P Invitation Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ CFGP2P_DBG(("%s P2P Device Discoverability Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ CFGP2P_DBG(("%s P2P Device Discoverability Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ CFGP2P_DBG(("%s P2P Provision Discovery Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ CFGP2P_DBG(("%s P2P Provision Discovery Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P Public Action Frame\n",
+ (tx)? "TX": "RX"));
+
+ }
+
+ } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ CFGP2P_DBG(("%s P2P Notice of Absence Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ CFGP2P_DBG(("%s P2P Presence Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ CFGP2P_DBG(("%s P2P Presence Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ CFGP2P_DBG(("%s P2P Discoverability Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P Action Frame\n",
+ (tx)? "TX": "RX"));
+ }
+
+ } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ CFGP2P_DBG(("%s P2P GAS Initial Request\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ CFGP2P_DBG(("%s P2P GAS Initial Response\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ CFGP2P_DBG(("%s P2P GAS Comback Request\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ CFGP2P_DBG(("%s P2P GAS Comback Response\n",
+ (tx)? "TX" : "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P GAS Frame\n",
+ (tx)? "TX" : "RX"));
+ }
+
+
+ }
+
+}
+
+/*
+ * Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct wl_priv *wl)
+{
+ if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+ CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+ return -ENOMEM;
+ }
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+
+ INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
+ spin_lock_init(&wl->p2p->timer_lock);
+ return BCME_OK;
+
+}
+/*
+ * Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl)
+{
+ CFGP2P_DBG(("In\n"));
+ if (wl->p2p) {
+ kfree(wl->p2p);
+ wl->p2p = NULL;
+ }
+ wl->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
+{
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+ s32 ret = BCME_OK;
+ s32 val = 0;
+ /* Do we have to check whether APSTA is enabled or not ? */
+ wldev_iovar_getint(ndev, "apsta", &val);
+ if (val == 0) {
+ val = 1;
+ wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+ wldev_iovar_setint(ndev, "apsta", val);
+ wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+ }
+ val = 1;
+ /* Disable firmware roaming for P2P */
+ wldev_iovar_setint(ndev, "roam_off", val);
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+ sizeof(null_eth_addr), wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
+ if (ret && ret != BCME_UNSUPPORTED) {
+ CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+ }
+ return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_DBG(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ return err;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
+{
+ s32 ret;
+ struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n",
+ netdev->ifindex, mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+ ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (unlikely(ret < 0)) {
+ printk("'wl p2p_ifdel' error %d\n", ret);
+ }
+ return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+
+ if (unlikely(err < 0)) {
+ printk("'wl p2p_ifupd' error %d\n", err);
+ }
+ return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the created BSS
+ * @index : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
+{
+ s32 ret;
+ u8 getbuf[64];
+ struct net_device *dev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+
+ ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+ sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+ if (ret == 0) {
+ memcpy(index, getbuf, sizeof(index));
+ CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index));
+ }
+
+ return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+ }
+
+ return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+ wl_p2p_disc_st_t discovery_mode;
+ s32 ret;
+ struct net_device *dev;
+ CFGP2P_DBG(("enter\n"));
+
+ if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) {
+ CFGP2P_ERR((" %d index out of range\n", bssidx));
+ return -1;
+ }
+
+ dev = wl_to_p2p_bss_ndev(wl, bssidx);
+ if (unlikely(dev == NULL)) {
+ CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+ return BCME_NOTFOUND;
+ }
+
+ /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+ discovery_mode.state = mode;
+ discovery_mode.chspec = wl_ch_host_to_driver(channel);
+ discovery_mode.dwell = listen_ms;
+ ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+ sizeof(discovery_mode), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &wl->ioctl_buf_sync);
+
+ return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
+{
+ s32 ret;
+ struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+
+ ret = wldev_iovar_getint(dev, "p2p_dev", index);
+ CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+ return ret;
+ }
+ return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl)
+{
+
+ s32 index = 0;
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
+ CFGP2P_ERR(("do nothing, already initialized\n"));
+ return ret;
+ }
+
+ ret = wl_cfgp2p_set_discovery(wl, 1);
+ if (ret < 0) {
+ CFGP2P_ERR(("set discover error\n"));
+ return ret;
+ }
+ /* Enable P2P Discovery in the WL Driver */
+ ret = wl_cfgp2p_get_disc_idx(wl, &index);
+
+ if (ret < 0) {
+ return ret;
+ }
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
+
+ /* Set the initial discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret != 0)) {
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ wl_cfgp2p_set_discovery(wl, 0);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ return 0;
+ }
+ return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @wl : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR(("do nothing, not initialized\n"));
+ return -1;
+ }
+ /* Set the discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+ ret = wl_cfgp2p_set_discovery(wl, 0);
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
+ * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+ * BSS.
+ */
+
+ /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+ * have no discovery BSS.
+ */
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+ return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @wl : wl_private data
+ * @ie : probe request ie (WPS IE + P2P IE)
+ * @ie_len : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev,
+ const u8 *ie, u32 ie_len)
+{
+ s32 ret = BCME_OK;
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+ goto set_ie;
+ }
+
+ wl_set_p2p_status(wl, DISCOVERY_ON);
+
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wl_cfgp2p_init_discovery(wl);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" init discovery error %d\n", ret));
+ goto exit;
+ }
+ /* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+ * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+ * Some peer devices may not initiate WPS with us if this bit is not set.
+ */
+ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
+ "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" wsec error %d\n", ret));
+ }
+set_ie:
+ ret = wl_cfgp2p_set_management_ie(wl, dev,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE),
+ VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @wl : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" enter\n"));
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR((" do nothing, not initialized\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret < 0)) {
+
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ }
+ /* Do a scan abort to stop the driver's scan engine in case it is still
+ * waiting out an action frame tx dwell time.
+ */
+#ifdef NOT_YET
+ if (wl_get_p2p_status(wl, SCANNING)) {
+ p2pwlu_scan_abort(hdl, FALSE);
+ }
+#endif
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+ ret = wl_cfgp2p_deinit_discovery(wl);
+
+exit:
+ return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
+ u32 num_chans, u16 *channels,
+ s32 search_state, u16 action, u32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 memsize;
+ s32 eparams_size;
+ u32 i;
+ s8 *memblk;
+ wl_p2p_scan_t *p2p_params;
+ wl_escan_params_t *eparams;
+ wlc_ssid_t ssid;
+ /* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 50
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+ struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_set_p2p_status(wl, SCANNING);
+ /* Allocate scan params which need space for 3 channels and 0 ssids */
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+
+ memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+ memblk = scanparambuf;
+ if (memsize > sizeof(scanparambuf)) {
+ CFGP2P_ERR((" scanpar buf too small (%u > %u)\n",
+ memsize, sizeof(scanparambuf)));
+ return -1;
+ }
+ memset(memblk, 0, memsize);
+ memset(wl->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+ if (search_state == WL_P2P_DISC_ST_SEARCH) {
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+ ssid.SSID_len = htod32(0);
+
+ } else if (search_state == WL_P2P_DISC_ST_SCAN) {
+ /* SCAN STATE 802.11 SCAN
+ * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+ * So if P2P_find command with type=progressive,
+ * we have to set ssid to P2P WILDCARD because
+ * we just do broadcast scan unless setting SSID
+ */
+ strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID);
+ ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN);
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ }
+
+
+ /* Fill in the P2P scan structure at the start of the iovar param block */
+ p2p_params = (wl_p2p_scan_t*) memblk;
+ p2p_params->type = 'E';
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ if (active)
+ eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+ else
+ eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+ memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ if (ssid.SSID_len)
+ memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+ eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ if (wl_get_drv_status_all(wl, CONNECTED))
+ eparams->params.active_time = htod32(-1);
+ else if (num_chans == 3)
+ eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ else
+ eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ eparams->params.passive_time = htod32(-1);
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+ }
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = htod16(0x1234);
+ CFGP2P_INFO(("SCAN CHANNELS : "));
+
+ for (i = 0; i < num_chans; i++) {
+ if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+ else CFGP2P_INFO((",%d", channels[i]));
+ }
+
+ CFGP2P_INFO(("\n"));
+
+ ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+ memblk, memsize, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+ s32 bssidx, s32 channel)
+{
+ s32 ret = 0;
+ u32 chan_cnt = 0;
+ u16 *default_chan_list = NULL;
+ if (!p2p_is_on(wl))
+ return -BCME_ERROR;
+ CFGP2P_ERR((" Enter\n"));
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ if (channel)
+ chan_cnt = 1;
+ else
+ chan_cnt = SOCIAL_CHAN_CNT;
+ default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ CFGP2P_ERR(("channel list allocation failed \n"));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ default_chan_list[0] = channel;
+ } else {
+ default_chan_list[0] = SOCIAL_CHAN_1;
+ default_chan_list[1] = SOCIAL_CHAN_2;
+ default_chan_list[2] = SOCIAL_CHAN_3;
+ }
+ ret = wl_cfgp2p_escan(wl, ndev, true, SOCIAL_CHAN_CNT,
+ default_chan_list, WL_P2P_DISC_ST_SEARCH,
+ WL_SCAN_ACTION_START, bssidx);
+ kfree(default_chan_list);
+exit:
+ return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ * VNDR_IE_ASSOCREQ_FLAG)
+ * @ie : VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+ /* Vendor-specific Information Element ID */
+#define VNDR_SPEC_ELEMENT_ID 0xdd
+ s32 ret = BCME_OK;
+ u32 pos;
+ u8 *ie_buf;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u8 ie_id, ie_len;
+ u8 delete = 0;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
+ if (p2p_is_on(wl) && bssidx != -1) {
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx));
+ break;
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx));
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx));
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx));
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = IE_TYPE(beacon, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx));
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = wl->ap_info->probe_res_ie;
+ mgmt_ie_len = &wl->ap_info->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = wl->ap_info->beacon_ie;
+ mgmt_ie_len = &wl->ap_info->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else if (bssidx == -1 && wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->probe_req_ie;
+ mgmt_ie_len = &wl->sta_info->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->assoc_req_ie;
+ mgmt_ie_len = &wl->sta_info->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else {
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ CFGP2P_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
+ } else {
+ if (mgmt_ie_buf != NULL) {
+ if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) {
+ CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+ goto exit;
+ }
+ pos = 0;
+ delete = 1;
+ ie_buf = (u8 *) mgmt_ie_buf;
+ while (pos < *mgmt_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag,
+ ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3,
+ ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+
+ }
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (vndr_ie && vndr_ie_len) {
+ /* save the current IE in wl struct */
+ memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len);
+ *mgmt_ie_len = vndr_ie_len;
+ pos = 0;
+ ie_buf = (u8 *) vndr_ie;
+ delete = 0;
+ while (pos < vndr_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag,
+ ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3,
+ ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+ }
+ }
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+ return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @wl : wl_private data
+ * @bssidx : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
+{
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+ if (bssidx < 0) {
+ CFGP2P_ERR(("invalid bssidx\n"));
+ return BCME_BADARG;
+ }
+ INIT_IE(probe_req, bssidx);
+ INIT_IE(probe_res, bssidx);
+ INIT_IE(assoc_req, bssidx);
+ INIT_IE(assoc_res, bssidx);
+ INIT_IE(beacon, bssidx);
+ return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ if (tlvs == NULL)
+ return FALSE;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_p2p_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_wfd_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+static s32
+wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ s32 iecount;
+
+ vndr_ie_setbuf_t *ie_setbuf;
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+ CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+ return -1;
+ }
+
+ buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1;
+ ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+ CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len));
+ if (!ie_setbuf) {
+
+ CFGP2P_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ if (delete)
+ strcpy(ie_setbuf->cmd, "del");
+ else
+ strcpy(ie_setbuf->cmd, "add");
+ /* Buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int));
+ pktflag = htod32(pktflag);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag,
+ &pktflag, sizeof(uint32));
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len
+ = (uchar)(data_len + VNDR_IE_MIN_LEN);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len);
+ err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ CFGP2P_INFO(("vndr_ie iovar returns %d\n", err));
+ kfree(ie_setbuf);
+ return err;
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device to search bssidx
+ * Returns bssidx for ndev
+ */
+s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev)
+{
+ u32 i;
+ s32 index = -1;
+
+ if (ndev == NULL) {
+ CFGP2P_ERR((" ndev is NULL\n"));
+ goto exit;
+ }
+ if (!wl->p2p_supported) {
+ return P2PAPI_BSSCFG_PRIMARY;
+ }
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
+ index = wl_to_p2p_bss_bssidx(wl, i);
+ break;
+ }
+ }
+ if (index == -1)
+ return P2PAPI_BSSCFG_PRIMARY;
+exit:
+ return index;
+}
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
+ wl_set_p2p_status(wl, LISTEN_EXPIRED);
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ spin_lock_bh(&wl->p2p->timer_lock);
+ del_timer_sync(&wl->p2p->listen_timer);
+ spin_unlock_bh(&wl->p2p->timer_lock);
+ }
+ cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id, &wl->remain_on_chan,
+ wl->remain_on_chan_type, GFP_KERNEL);
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ return ret;
+
+}
+
+/*
+ * Timer expire callback function for LISTEN
+ * We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ * so lets do it from thread context.
+ */
+static void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct wl_priv *wl = (struct wl_priv *) data;
+
+ CFGP2P_DBG((" Enter\n"));
+ msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+}
+
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters :
+ * @wl : wl_private data
+ * @channel : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
+{
+#define INIT_TIMER(timer, func, duration, extra_delay) \
+ do { \
+ init_timer(timer); \
+ timer->function = func; \
+ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ timer->data = (unsigned long) wl; \
+ add_timer(timer); \
+ } while (0);
+
+ s32 ret = BCME_OK;
+ struct timer_list *_timer;
+ CFGP2P_DBG((" Enter Channel : %d, Duration : %d\n", channel, duration_ms));
+ if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
+
+ CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+ goto exit;
+
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ _timer = &wl->p2p->listen_timer;
+
+ /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+ * otherwise we will wait up to duration_ms + 200ms
+ */
+ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, 200);
+
+#undef INIT_TIMER
+exit:
+ return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" Enter\n"));
+ if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
+
+ CFGP2P_DBG((" do nothing, discovery is off\n"));
+ return ret;
+ }
+ if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
+ CFGP2P_DBG(("already : %d\n", enable));
+ return ret;
+ }
+
+ wl_chg_p2p_status(wl, SEARCH_ENABLED);
+ /* When disabling Search, reset the WL driver's p2p discovery state to
+ * WL_P2P_DISC_ST_SCAN.
+ */
+ if (!enable) {
+ wl_clr_p2p_status(wl, SCANNING);
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ }
+
+ return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+ u32 event_type = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ CFGP2P_DBG((" Enter\n"));
+ if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+ if (status == WLC_E_STATUS_SUCCESS) {
+ wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+ }
+ else {
+ wl_set_p2p_status(wl, ACTION_TX_NOACK);
+ CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+ }
+ } else {
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+ "status : %d\n", status));
+ wake_up_interruptible(&wl->netif_change_event);
+ }
+ return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 timeout = 0;
+
+
+ CFGP2P_INFO(("\n"));
+ CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+ af_params->channel, af_params->dwell_time));
+
+ wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(wl, ACTION_TX_NOACK);
+#define MAX_WAIT_TIME 2000
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ if (ret < 0) {
+
+ CFGP2P_ERR((" sending action frame is failed\n"));
+ goto exit;
+ }
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) || wl_get_p2p_status(wl, ACTION_TX_NOACK)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
+ CFGP2P_INFO(("tx action frame operation is completed\n"));
+ ret = BCME_OK;
+ } else {
+ ret = BCME_ERROR;
+ CFGP2P_INFO(("tx action frame operation is failed\n"));
+ }
+exit:
+ CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+#undef MAX_WAIT_TIME
+ return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+ struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+ memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+ memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+ out_dev_addr->octet[0] |= 0x02;
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address.
+ */
+ memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+ out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+ u16 len = ie->len;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+ CFGP2P_DBG((" Enter\n"));
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ if (subelt_id == P2P_SEID_INTINTADDR) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_INFO) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_GROUP_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+ } return;
+ } else {
+ CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+ }
+ subel += subelt_len;
+ }
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup(). DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
+
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+ if (result != 0) {
+ CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
+ CFGP2P_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
+ }
+ return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+ s32 ret = BCME_OK;
+ s32 val = up ? 1 : 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+ CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+
+ if (ret != 0) {
+ CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+ }
+
+ return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ s32 p2p_supported = 0;
+ ret = wldev_iovar_getint(ndev, "p2p",
+ &p2p_supported);
+ if (ret < 0) {
+ CFGP2P_ERR(("wl p2p error %d\n", ret));
+ return 0;
+ }
+ if (p2p_supported == 1) {
+ CFGP2P_INFO(("p2p is supported\n"));
+ } else {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ p2p_supported = 0;
+ }
+ return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct wl_priv *wl)
+{
+ if (timer_pending(&wl->p2p->listen_timer))
+ del_timer_sync(&wl->p2p->listen_timer);
+ wl_cfgp2p_deinit_priv(wl);
+ return 0;
+}
+s32
+wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ s32 ret = -1;
+ int count, start, duration;
+ wl_p2p_sched_t dongle_noa;
+
+ CFGP2P_DBG((" Enter\n"));
+
+ memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+ if (wl->p2p && wl->p2p->vif_created) {
+
+ wl->p2p->noa.desc[0].start = 0;
+
+ sscanf(buf, "%d %d %d", &count, &start, &duration);
+ CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+ count, start, duration));
+ if (count != -1)
+ wl->p2p->noa.desc[0].count = count;
+
+ /* supplicant gives interval as start */
+ if (start != -1)
+ wl->p2p->noa.desc[0].interval = start;
+
+ if (duration != -1)
+ wl->p2p->noa.desc[0].duration = duration;
+
+ if (wl->p2p->noa.desc[0].count != 255) {
+ wl->p2p->noa.desc[0].start = 200;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+ }
+ else {
+ /* Continuous NoA interval. */
+ dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ if ((wl->p2p->noa.desc[0].interval == 102) ||
+ (wl->p2p->noa.desc[0].interval == 100)) {
+ wl->p2p->noa.desc[0].start = 100 -
+ wl->p2p->noa.desc[0].duration;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+ }
+ else {
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ }
+ }
+ /* Put the noa descriptor in dongle format for dongle */
+ dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count);
+ if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration);
+ }
+ else {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000);
+ }
+ dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000);
+
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_noa", &dongle_noa, sizeof(dongle_noa), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ &wl->ioctl_buf_sync);
+
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+ }
+ return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len)
+{
+
+ wifi_p2p_noa_desc_t *noa_desc;
+ int len = 0, i;
+ char _buf[200];
+
+ CFGP2P_DBG((" Enter\n"));
+ buf[0] = '\0';
+ if (wl->p2p && wl->p2p->vif_created) {
+ if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) {
+ _buf[0] = 1; /* noa index */
+ _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) |
+ (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */
+ len += 2;
+ if (wl->p2p->noa.desc[0].count) {
+ noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+ noa_desc->cnt_type = wl->p2p->noa.desc[0].count;
+ noa_desc->duration = wl->p2p->noa.desc[0].duration;
+ noa_desc->interval = wl->p2p->noa.desc[0].interval;
+ noa_desc->start = wl->p2p->noa.desc[0].start;
+ len += sizeof(wifi_p2p_noa_desc_t);
+ }
+ if (buf_len <= len * 2) {
+ CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+ "returning noa in string format\n", buf_len));
+ return -1;
+ }
+ /* We have to convert the buffer data into ASCII strings */
+ for (i = 0; i < len; i++) {
+ sprintf(buf, "%02x", _buf[i]);
+ buf += 2;
+ }
+ buf[i*2] = '\0';
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+ return -1;
+ }
+ return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ int ps, ctw;
+ int ret = -1;
+ s32 legacy_ps;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl->p2p && wl->p2p->vif_created) {
+ sscanf(buf, "%d %d %d", &legacy_ps, &ps, &ctw);
+ CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+ if (ctw != -1) {
+ wl->p2p->ops.ctw = ctw;
+ ret = 0;
+ }
+ if (ps != -1) {
+ wl->p2p->ops.ops = ps;
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+ }
+ }
+
+ if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+ ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
+ if (unlikely(ret)) {
+ CFGP2P_ERR(("error (%d)\n", ret));
+ }
+ }
+ else
+ CFGP2P_ERR(("ilegal setting\n"));
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+ ret = -1;
+ }
+ return ret;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = NULL;
+ u16 len = 0;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+
+ if (!buf) {
+ WL_ERR(("P2P IE not present"));
+ return 0;
+ }
+
+ ie = (wifi_p2p_ie_t*) buf;
+ len = ie->len;
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return subel;
+ }
+
+ /* Go to next subelement */
+ subel += subelt_len;
+ }
+
+ /* Not Found */
+ return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT 0x01
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+ wifi_p2p_ie_t * p2p_ie = NULL;
+ u8 *capability = NULL;
+ bool p2p_go = 0;
+ u8 *ptr = NULL;
+
+ if (!(p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, bi->ie_length))) {
+ WL_ERR(("P2P IE not found"));
+ return NULL;
+ }
+
+ if (!(capability = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_P2P_INFO))) {
+ WL_ERR(("P2P Capability attribute not found"));
+ return NULL;
+ }
+
+ /* Check Group capability for Group Owner bit */
+ p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+ if (!p2p_go) {
+ return bi->BSSID.octet;
+ }
+
+ /* In probe responses, DEVICE INFO attribute will be present */
+ if (!(ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_INFO))) {
+ /* If DEVICE_INFO is not found, this might be a beacon frame.
+ * check for DEVICE_ID in the beacon frame.
+ */
+ ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_ID);
+ }
+
+ if (!ptr)
+ WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+ return ptr;
+}
+
+s32
+wl_cfgp2p_register_ndev(struct wl_priv *wl)
+{
+ int ret = 0;
+ struct net_device* net = NULL;
+ struct wireless_dev *wdev;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(wl)))) {
+ CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+
+ strcpy(net->name, "p2p%d");
+ net->name[IFNAMSIZ - 1] = '\0';
+
+ /* Copy the reference to wl_priv */
+ memcpy((void *)netdev_priv(net), &wl, sizeof(wl));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->do_ioctl = wl_cfgp2p_do_ioctl;
+ net->hard_start_xmit = wl_cfgp2p_start_xmit;
+ net->open = wl_cfgp2p_if_open;
+ net->stop = wl_cfgp2p_if_stop;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+ /* Register with a dummy MAC addr */
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+
+ wdev->wiphy = wl->wdev->wiphy;
+
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+ net->ieee80211_ptr = wdev;
+
+ SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+ /* Associate p2p0 network interface with new wdev */
+ wdev->netdev = net;
+
+ /* store p2p net ptr for further reference. Note that iflist won't have this
+ * entry as there corresponding firmware interface is a "Hidden" interface.
+ */
+ if (wl->p2p_net) {
+ CFGP2P_ERR(("p2p_net defined already.\n"));
+ return -EINVAL;
+ } else {
+ wl->p2p_wdev = wdev;
+ wl->p2p_net = net;
+ }
+
+ ret = register_netdev(net);
+ if (ret) {
+ CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+ goto fail;
+ }
+
+ printk("%s: P2P Interface Registered\n", net->name);
+
+ return ret;
+fail:
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ if (net) {
+ unregister_netdev(net);
+ free_netdev(net);
+ }
+
+ return -ENODEV;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct wl_priv *wl)
+{
+
+ if (!wl || !wl->p2p_net) {
+ CFGP2P_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+
+ unregister_netdev(wl->p2p_net);
+ free_netdev(wl->p2p_net);
+
+ return 0;
+}
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ CFGP2P_DBG(("(%s) is not used for data operations. Droping the packet. \n", ndev->name));
+ return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ /* There is no ifidx corresponding to p2p0 in our firmware. So we should
+ * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+ * For Android PRIV CMD handling map it to primary I/F
+ */
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+ } else {
+ CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+ __FUNCTION__, cmd));
+ return -1;
+ }
+
+ return ret;
+}
+
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+
+ if (!wdev)
+ return -EINVAL;
+
+ /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+ * do it here. This will make sure that in concurrent mode, supplicant
+ * is not dependent on a particular order of interface initialization.
+ * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+ * -iwlan0.
+ */
+ wl_cfg80211_do_driver_init(net);
+
+ wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+ | BIT(NL80211_IFTYPE_P2P_GO));
+
+ return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+
+ if (!wdev)
+ return -EINVAL;
+
+ wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+ & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO)));
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 000000000000..427cb4a11bd1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,284 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.h 316197 2012-02-21 13:16:39Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct wl_priv;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not
+ * confuse this with a bsscfg index. This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+#define IE_MAX_LEN 300
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+ u8 p2p_probe_req_ie[IE_MAX_LEN];
+ u8 p2p_probe_res_ie[IE_MAX_LEN];
+ u8 p2p_assoc_req_ie[IE_MAX_LEN];
+ u8 p2p_assoc_res_ie[IE_MAX_LEN];
+ u8 p2p_beacon_ie[IE_MAX_LEN];
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_res_ie_len;
+ u32 p2p_assoc_req_ie_len;
+ u32 p2p_assoc_res_ie_len;
+ u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+ u32 bssidx;
+ struct net_device *dev;
+ struct p2p_saved_ie saved_ie;
+ void *private_data;
+};
+
+struct p2p_info {
+ bool on; /* p2p on/off switch */
+ bool scan;
+ bool vif_created;
+ s8 vir_ifname[IFNAMSIZ];
+ unsigned long status;
+ struct ether_addr dev_addr;
+ struct ether_addr int_addr;
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ wl_p2p_sched_t noa;
+ wl_p2p_ops_t ops;
+ wlc_ssid_t ssid;
+ spinlock_t timer_lock;
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+ WLP2P_STATUS_DISCOVERY_ON = 0,
+ WLP2P_STATUS_SEARCH_ENABLED,
+ WLP2P_STATUS_IF_ADD,
+ WLP2P_STATUS_IF_DEL,
+ WLP2P_STATUS_IF_DELETING,
+ WLP2P_STATUS_IF_CHANGING,
+ WLP2P_STATUS_IF_CHANGED,
+ WLP2P_STATUS_LISTEN_EXPIRED,
+ WLP2P_STATUS_ACTION_TX_COMPLETED,
+ WLP2P_STATUS_ACTION_TX_NOACK,
+ WLP2P_STATUS_SCANNING
+};
+
+
+#define wl_to_p2p_bss_ndev(w, type) ((wl)->p2p->bss_idx[type].dev)
+#define wl_to_p2p_bss_bssidx(w, type) ((wl)->p2p->bss_idx[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(w, type) ((wl)->p2p->bss_idx[type].saved_ie)
+#define wl_to_p2p_bss_private(w, type) ((wl)->p2p->bss_idx[type].private_data)
+#define wl_to_p2p_bss(wl, type) ((wl)->p2p->bss_idx[type])
+#define wl_get_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : test_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_set_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : set_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_clr_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : clear_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_chg_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:change_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define p2p_on(wl) ((wl)->p2p->on)
+#define p2p_scan(wl) ((wl)->p2p->scan)
+#define p2p_is_on(wl) ((wl)->p2p && (wl)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define CFGP2P_ERR(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFGP2P-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_INFO(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFGP2P-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_DBG(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFGP2P-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+ } while (0)
+
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len);
+extern s32
+wl_cfgp2p_init_priv(struct wl_priv *wl);
+extern void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode,
+ u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans,
+ u16 *channels,
+ s32 search_state, u16 action, u32 bssidx);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+ s32 bssidx, s32 channel);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+ struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct wl_priv *wl);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct wl_priv *wl);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct wl_priv *wl);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define SOCIAL_CHAN_CNT 3
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+ ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+ (frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+#define IS_P2P_PUB_ACT_REQ(frame, len) (wl_cfgp2p_is_pub_action(frame, len) && \
+ ((frame->subtype == P2P_PAF_GON_REQ) || \
+ (frame->subtype == P2P_PAF_INVITE_REQ) || \
+ (frame->subtype == P2P_PAF_PROVDIS_REQ)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid) (memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) == 0)
+#endif /* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 000000000000..b5e708075b83
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,63 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h 326635 2012-04-10 03:15:29Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#if 0 && (VERSION_MAJOR > 9)
+#include <IOKit/apple80211/IO8Log.h>
+#define WL_PRINT(args) do { printf args; IO8Log args; } while (0)
+#else
+#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0)
+#endif
+
+
+
+#define WL_NONE(args)
+
+#define WL_ERROR(args)
+#define WL_TRACE(args)
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args) WL_PRINT(args)
+#define WL_WSEC_DUMP(args) WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 000000000000..be080250ce5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,3737 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c 312290 2012-02-02 02:52:18Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+
+typedef const struct si_pub si_t;
+#include <wlioctl.h>
+
+
+#include <wl_dbg.h>
+#include <wl_iw.h>
+
+#ifdef BCMWAPI_WPI
+
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;
+#endif
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+ uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif
+
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+
+
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ int iscan_state;
+ iscan_buf_t * list_hdr;
+ iscan_buf_t * list_cur;
+
+
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+
+
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+
+
+typedef struct priv_link {
+ wl_iw_t *wliw;
+} priv_link_t;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev)
+#endif
+
+
+#define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+ struct net_device *dev,
+ int cmd,
+ void *arg,
+ int len
+)
+{
+ struct ifreq ifr;
+ wl_ioctl_t ioc;
+ mm_segment_t fs;
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+
+ strcpy(ifr.ifr_name, dev->name);
+ ifr.ifr_data = (caddr_t) &ioc;
+
+#ifndef LINUX_HYBRID
+
+ dev_open(dev);
+#endif
+
+ fs = get_fs();
+ set_fs(get_ds());
+#if defined(WL_USE_NETDEV_OPS)
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+ ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+ set_fs(fs);
+
+ return ret;
+}
+
+
+
+static int
+dev_wlc_intvar_set(
+ struct net_device *dev,
+ char *name,
+ int val)
+{
+ char buf[WLC_IOCTL_SMLEN];
+ uint len;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ ASSERT(len);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+static int
+dev_iw_iovar_setbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+ BCM_REFERENCE(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+ struct net_device *dev,
+ char *name,
+ char *buf, int len)
+{
+ char *ioctlbuf;
+ uint buflen;
+ int error;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+
+ buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(buflen);
+ error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+ kfree(ioctlbuf);
+ return error;
+}
+#endif
+
+
+
+static int
+dev_wlc_bufvar_get(
+ struct net_device *dev,
+ char *name,
+ char *buf, int buflen)
+{
+ char *ioctlbuf;
+ int error;
+
+ uint len;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+ len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(len);
+ BCM_REFERENCE(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ if (!error)
+ bcopy(ioctlbuf, buf, buflen);
+
+ kfree(ioctlbuf);
+ return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+ struct net_device *dev,
+ char *name,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ uint data_null;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+
+ return (error);
+}
+
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+ __u16 cmd;
+ __u16 flags;
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ void *wrqu, char *extra);
+#endif
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int dc = *(int *)extra;
+ int error;
+
+ error = dev_wlc_intvar_set(dev, "leddc", dc);
+ return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int mode = *(int *)extra;
+ int error;
+
+ mode = htod32(mode);
+ error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+ return error;
+}
+
+static int
+wl_iw_set_pm(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int pm = *(int *)extra;
+ int error;
+
+ pm = htod32(pm);
+ error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+ return error;
+}
+#endif
+
+int
+wl_iw_send_priv_event(
+ struct net_device *dev,
+ char *flag
+)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd;
+
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ if (strlen(flag) > sizeof(extra))
+ return -1;
+
+ strcpy(extra, flag);
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+ return 0;
+}
+
+static int
+wl_iw_config_commit(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *zwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct sockaddr bssid;
+
+ WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+ return error;
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ if (!ssid.SSID_len)
+ return 0;
+
+ bzero(&bssid, sizeof(struct sockaddr));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_name(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *cwrq,
+ char *extra
+)
+{
+ int phytype, err;
+ uint band[3];
+ char cap[5];
+
+ WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+ cap[0] = 0;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+ goto done;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+ goto done;
+
+ band[0] = dtoh32(band[0]);
+ switch (phytype) {
+ case WLC_PHY_TYPE_A:
+ strcpy(cap, "a");
+ break;
+ case WLC_PHY_TYPE_B:
+ strcpy(cap, "b");
+ break;
+ case WLC_PHY_TYPE_LP:
+ case WLC_PHY_TYPE_G:
+ if (band[0] >= 2)
+ strcpy(cap, "abg");
+ else
+ strcpy(cap, "bg");
+ break;
+ case WLC_PHY_TYPE_N:
+ if (band[0] >= 2)
+ strcpy(cap, "abgn");
+ else
+ strcpy(cap, "bgn");
+ break;
+ }
+done:
+ snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+ return 0;
+}
+
+static int
+wl_iw_set_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error, chan;
+ uint sf = 0;
+
+ WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+
+ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+ chan = fwrq->m;
+ }
+
+
+ else {
+
+ if (fwrq->e >= 6) {
+ fwrq->e -= 6;
+ while (fwrq->e--)
+ fwrq->m *= 10;
+ } else if (fwrq->e < 6) {
+ while (fwrq->e++ < 6)
+ fwrq->m /= 10;
+ }
+
+ if (fwrq->m > 4000 && fwrq->m < 5000)
+ sf = WF_CHAN_FACTOR_4_G;
+
+ chan = wf_mhz2channel(fwrq->m, sf);
+ }
+ chan = htod32(chan);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+
+
+ fwrq->m = dtoh32(ci.hw_channel);
+ fwrq->e = dtoh32(0);
+ return 0;
+}
+
+static int
+wl_iw_set_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int infra = 0, ap = 0, error = 0;
+
+ WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+ switch (*uwrq) {
+ case IW_MODE_MASTER:
+ infra = ap = 1;
+ break;
+ case IW_MODE_ADHOC:
+ case IW_MODE_AUTO:
+ break;
+ case IW_MODE_INFRA:
+ infra = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ infra = htod32(infra);
+ ap = htod32(ap);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int error, infra = 0, ap = 0;
+
+ WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+ return error;
+
+ infra = dtoh32(infra);
+ ap = dtoh32(ap);
+ *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+ return 0;
+}
+
+static int
+wl_iw_get_range(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ static int channels[MAXCHANNEL+1];
+ wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+ wl_rateset_t rateset;
+ int error, i, k;
+ uint sf, ch;
+
+ int phytype;
+ int bw_cap = 0, sgi_tx = 0, nmode = 0;
+ channel_info_t ci;
+ uint8 nrate_list2copy = 0;
+ uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+ {14, 29, 43, 58, 87, 116, 130, 144},
+ {27, 54, 81, 108, 162, 216, 243, 270},
+ {30, 60, 90, 120, 180, 240, 270, 300}};
+
+ WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+
+
+ range->min_nwid = range->max_nwid = 0;
+
+
+ list->count = htod32(MAXCHANNEL);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+ return error;
+ for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+ range->freq[i].i = dtoh32(list->element[i]);
+
+ ch = dtoh32(list->element[i]);
+ if (ch <= CH_MAX_2G_CHANNEL)
+ sf = WF_CHAN_FACTOR_2_4_G;
+ else
+ sf = WF_CHAN_FACTOR_5_G;
+
+ range->freq[i].m = wf_channel2mhz(ch, sf);
+ range->freq[i].e = 6;
+ }
+ range->num_frequency = range->num_channels = i;
+
+
+ range->max_qual.qual = 5;
+
+ range->max_qual.level = 0x100 - 200;
+
+ range->max_qual.noise = 0x100 - 200;
+
+ range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+
+ range->avg_qual.qual = 3;
+
+ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+
+ range->avg_qual.noise = 0x100 - 75;
+#endif
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ rateset.count = dtoh32(rateset.count);
+ range->num_bitrates = rateset.count;
+ for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000;
+ dev_wlc_intvar_get(dev, "nmode", &nmode);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+ return error;
+
+ if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
+ (phytype == WLC_PHY_TYPE_LCN40))) {
+ dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+ dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+ dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+ ci.hw_channel = dtoh32(ci.hw_channel);
+
+ if (bw_cap == 0 ||
+ (bw_cap == 2 && ci.hw_channel <= 14)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 0;
+ else
+ nrate_list2copy = 1;
+ }
+ if (bw_cap == 1 ||
+ (bw_cap == 2 && ci.hw_channel >= 36)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 2;
+ else
+ nrate_list2copy = 3;
+ }
+ range->num_bitrates += 8;
+ for (k = 0; i < range->num_bitrates; k++, i++) {
+
+ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+ }
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+ return error;
+ i = dtoh32(i);
+ if (i == WLC_PHY_TYPE_A)
+ range->throughput = 24000000;
+ else
+ range->throughput = 1500000;
+
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+ range->num_encoding_sizes = 4;
+ range->encoding_size[0] = WEP1_KEY_SIZE;
+ range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+ range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+ range->encoding_size[2] = 0;
+#endif
+ range->encoding_size[3] = AES_KEY_SIZE;
+
+
+ range->min_pmp = 0;
+ range->max_pmp = 0;
+ range->min_pmt = 0;
+ range->max_pmt = 0;
+ range->pmp_flags = 0;
+ range->pm_capa = 0;
+
+
+ range->num_txpower = 2;
+ range->txpower[0] = 1;
+ range->txpower[1] = 255;
+ range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 19;
+
+
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+
+ range->min_retry = 1;
+ range->max_retry = 255;
+
+ range->min_r_time = 0;
+ range->max_r_time = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+ range->enc_capa = IW_ENC_CAPA_WPA;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+#if (defined(BCMSUP_PSK) && defined(WLFBT))
+
+ range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+#endif
+
+
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+
+ range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif
+
+ return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_iw_set_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int i;
+
+ WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+ for (i = 0; i < iw->spy_num; i++)
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+ return 0;
+}
+
+static int
+wl_iw_get_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = iw->spy_num;
+ for (i = 0; i < iw->spy_num; i++) {
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ addr[i].sa_family = AF_UNIX;
+ memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+ iw->spy_qual[i].updated = 0;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_set_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+ if (awrq->sa_family != ARPHRD_ETHER) {
+ WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+
+ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+ WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+ }
+ return 0;
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+ awrq->sa_family = ARPHRD_ETHER;
+ memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+
+ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ struct iw_mlme *mlme;
+ scb_val_t scbval;
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+ mlme = (struct iw_mlme *)extra;
+ if (mlme == NULL) {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ scbval.val = mlme->reason_code;
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+ if (mlme->cmd == IW_MLME_DISASSOC) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ }
+ else if (mlme->cmd == IW_MLME_DEAUTH) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ }
+ else {
+ WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+ return error;
+ }
+
+ return error;
+}
+#endif
+
+static int
+wl_iw_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int error, i;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+
+ kfree(list);
+
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ iscan_buf_t * buf;
+ iscan_info_t *iscan = g_iscan;
+
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_aplist(dev, info, dwrq, extra);
+ }
+
+ buf = iscan->list_hdr;
+
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ bi = NULL;
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+ buf = buf->next;
+ }
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 13
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+
+ WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+
+ (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+
+ WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_set_scan(dev, info, wrqu, extra);
+ }
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ return 0;
+ }
+
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+ wl_iw_set_event_mask(dev);
+ wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpaie;
+
+
+ if ((ie[1] >= 6) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpsie;
+
+
+ if ((ie[1] >= 4) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+#endif
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+ size_t len, int uppercase)
+{
+ size_t i;
+ char *pos = buf, *end = buf + buf_size;
+ int ret;
+ if (buf_size == 0)
+ return 0;
+ for (i = 0; i < len; i++) {
+ ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+ data[i]);
+ if (ret < 0 || ret >= end - pos) {
+ end[-1] = '\0';
+ return pos - buf;
+ }
+ pos += ret;
+ }
+ end[-1] = '\0';
+ return pos - buf;
+}
+
+
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+ return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+ struct iw_event iwe;
+ char *event;
+#ifdef BCMWAPI_WPI
+ char *buf;
+ int custom_event_len;
+#endif
+
+ event = *event_p;
+ if (bi->ie_length) {
+
+ bcm_tlv_t *ie;
+ uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ int ptr_len = bi->ie_length;
+
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+#if defined(WLFBT)
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+#endif
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+
+ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+#ifdef BCMWAPI_WPI
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+ WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else
+ iwe.cmd = IWEVCUSTOM;
+ custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+ iwe.u.data.length = custom_event_len;
+
+ buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+ if (buf == NULL)
+ {
+ WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+ break;
+ }
+
+ memcpy(buf, "wapi_ie=", 8);
+ wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+ wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+ wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+#endif
+ break;
+ }
+#endif
+ *event_p = event;
+ }
+
+#endif
+ return 0;
+}
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int error, i, j;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+ ci.scan_channel = dtoh32(ci.scan_channel);
+ if (ci.scan_channel)
+ return -EAGAIN;
+
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+
+ kfree(list);
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int ii, j;
+ int apcnt;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+
+ WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_scan(dev, info, dwrq, extra);
+ }
+
+
+ if (iscan->iscan_state == ISCAN_STATE_SCANING)
+ return -EAGAIN;
+
+ apcnt = 0;
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+ }
+
+ bi = NULL;
+ for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end)
+ return -E2BIG;
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+ return -E2BIG;
+
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ p_buf = p_buf->next;
+ }
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+#endif
+
+
+static int
+wl_iw_set_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+
+ memset(&ssid, 0, sizeof(ssid));
+ if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+ memcpy(ssid.SSID, extra, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid))))
+ return error;
+ }
+
+ else {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t))))
+ return error;
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("Error getting the SSID\n"));
+ return error;
+ }
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+
+ memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+ dwrq->length = ssid.SSID_len;
+
+ dwrq->flags = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if (dwrq->length > sizeof(iw->nickname))
+ return -E2BIG;
+
+ memcpy(iw->nickname, extra, dwrq->length);
+ iw->nickname[dwrq->length - 1] = '\0';
+
+ return 0;
+}
+
+static int
+wl_iw_get_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ strcpy(extra, iw->nickname);
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+static int wl_iw_set_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ wl_rateset_t rateset;
+ int error, rate, i, error_bg, error_a;
+
+ WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+
+ rateset.count = dtoh32(rateset.count);
+
+ if (vwrq->value < 0) {
+
+ rate = rateset.rates[rateset.count - 1] & 0x7f;
+ } else if (vwrq->value < rateset.count) {
+
+ rate = rateset.rates[vwrq->value] & 0x7f;
+ } else {
+
+ rate = vwrq->value / 500000;
+ }
+
+ if (vwrq->fixed) {
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+ error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+ } else {
+
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+
+ error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+
+
+ for (i = 0; i < rateset.count; i++)
+ if ((rateset.rates[i] & 0x7f) > rate)
+ break;
+ rateset.count = htod32(i);
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ }
+
+ return 0;
+}
+
+static int wl_iw_get_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rate;
+
+ WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+ return error;
+ rate = dtoh32(rate);
+ vwrq->value = rate * 500000;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+ if (vwrq->disabled)
+ rts = DOT11_DEFAULT_RTS_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+ return -EINVAL;
+ else
+ rts = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+ return error;
+
+ vwrq->value = rts;
+ vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, frag;
+
+ WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+ if (vwrq->disabled)
+ frag = DOT11_DEFAULT_FRAG_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+ return -EINVAL;
+ else
+ frag = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, fragthreshold;
+
+ WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+ return error;
+
+ vwrq->value = fragthreshold;
+ vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable;
+ uint16 txpwrmw;
+ WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+
+ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+ disable += WL_RADIO_SW_DISABLE << 16;
+
+ disable = htod32(disable);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+ return error;
+
+
+ if (disable & WL_RADIO_SW_DISABLE)
+ return 0;
+
+
+ if (!(vwrq->flags & IW_TXPOW_MWATT))
+ return -EINVAL;
+
+
+ if (vwrq->value < 0)
+ return 0;
+
+ if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+ else txpwrmw = (uint16)vwrq->value;
+
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+ return error;
+}
+
+static int
+wl_iw_get_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable, txpwrdbm;
+ uint8 result;
+
+ WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+ (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+ return error;
+
+ disable = dtoh32(disable);
+ result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ vwrq->value = (int32)bcm_qdbm_to_mw(result);
+ vwrq->fixed = 0;
+ vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+
+ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+ return -EINVAL;
+
+
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+ !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif
+
+ lrl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+ return error;
+ }
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+ !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif
+
+ srl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+ vwrq->disabled = 0;
+
+
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+ return error;
+
+ lrl = dtoh32(lrl);
+ srl = dtoh32(srl);
+
+
+ if (vwrq->flags & IW_RETRY_MAX) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = lrl;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = srl;
+ if (srl != lrl)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_set_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec;
+
+ WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+
+ if (key.index == DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+ } else {
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ return -EINVAL;
+ }
+
+
+ wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+ return error;
+
+
+ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ } else {
+ key.len = dwrq->length;
+
+ if (dwrq->length > sizeof(key.data))
+ return -EINVAL;
+
+ memcpy(key.data, extra, dwrq->length);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (key.len) {
+ case WEP1_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WEP128_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ case TKIP_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+#endif
+ case AES_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ swap_key_from_BE(&key);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+ return error;
+ }
+
+
+ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+ val = htod32(val);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec, auth;
+
+ WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+
+ bzero(&key, sizeof(wl_wsec_key_t));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = key.index;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ } else
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+ return error;
+
+ swap_key_to_BE(&key);
+
+ wsec = dtoh32(wsec);
+ auth = dtoh32(auth);
+
+ dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+
+ dwrq->flags = key.index + 1;
+ if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+
+ dwrq->flags |= IW_ENCODE_DISABLED;
+ }
+ if (auth) {
+
+ dwrq->flags |= IW_ENCODE_RESTRICTED;
+ }
+
+
+ if (dwrq->length && extra)
+ memcpy(extra, key.data, dwrq->length);
+
+ return 0;
+}
+
+static int
+wl_iw_set_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+ pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+ pm = htod32(pm);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+ return error;
+
+ pm = dtoh32(pm);
+ vwrq->disabled = pm ? 0 : 1;
+ vwrq->flags = IW_POWER_ALL_R;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+ uchar buf[WLC_IOCTL_SMLEN] = {0};
+ uchar *p = buf;
+ int wapi_ie_size;
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ if (extra[0] == DOT11_MNG_WAPI_ID)
+ {
+ wapi_ie_size = iwp->length;
+ memcpy(p, extra, iwp->length);
+ dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+ }
+ else
+#endif
+ dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+ return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ iwp->length = 64;
+ dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+ return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error;
+ struct iw_encode_ext *iwe;
+
+ WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+ iwe = (struct iw_encode_ext *)extra;
+
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+ }
+
+
+ key.index = 0;
+ if (dwrq->flags & IW_ENCODE_INDEX)
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ key.len = iwe->key_len;
+
+
+ if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+
+ if (key.len == 0) {
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+
+ key.index = htod32(key.index);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+ &key.index, sizeof(key.index));
+ if (error)
+ return error;
+ }
+
+ else {
+ swap_key_from_BE(&key);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ }
+#if (defined(BCMSUP_PSK) && defined(WLFBT))
+
+ else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+ int j;
+ wsec_pmk_t pmk;
+ char keystring[WSEC_MAX_PSK_LEN + 1];
+ char* charptr = keystring;
+ uint len;
+
+
+ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+ sprintf(charptr, "%02x", iwe->key[j]);
+ charptr += 2;
+ }
+ len = strlen(keystring);
+ pmk.key_len = htod16(len);
+ bcopy(keystring, pmk.key, len);
+ pmk.flags = htod16(WSEC_PASSPHRASE);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (error)
+ return error;
+ }
+#endif
+
+ else {
+ if (iwe->key_len > sizeof(key.data))
+ return -EINVAL;
+
+ WL_WSEC(("Setting the key index %d\n", key.index));
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("key is a Primary Key\n"));
+ key.flags = WL_PRIMARY_KEY;
+ }
+
+ bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+ if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+ uint8 keybuf[8];
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+
+
+ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ uchar *ivptr;
+ ivptr = (uchar *)iwe->rx_seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = TRUE;
+ }
+
+ switch (iwe->alg) {
+ case IW_ENCODE_ALG_NONE:
+ key.algo = CRYPTO_ALGO_OFF;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (iwe->key_len == WEP1_KEY_SIZE)
+ key.algo = CRYPTO_ALGO_WEP1;
+ else
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+#ifdef BCMWAPI_WPI
+ case IW_ENCODE_ALG_SM4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ key.flags &= ~WL_PRIMARY_KEY;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ swap_key_from_BE(&key);
+
+ dhd_wait_pend8021x(dev);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+
+#if WIRELESS_EXT > 17
+struct {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+static int
+wl_iw_set_pmksa(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ struct iw_pmksa *iwpmksa;
+ uint i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+ WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+ iwpmksa = (struct iw_pmksa *)extra;
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+ if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+ WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+ bzero((char *)&pmkid_list, sizeof(pmkid_list));
+ }
+ if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+ pmkid_list_t pmkid, *pmkidptr;
+ pmkidptr = &pmkid;
+ bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+ {
+ uint j;
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ for (; i < pmkid_list.pmkids.npmkid; i++) {
+ bcopy(&pmkid_array[i+1].BSSID,
+ &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&pmkid_array[i+1].PMKID,
+ &pmkid_array[i].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ pmkid_list.pmkids.npmkid--;
+ }
+ if (iwpmksa->cmd == IW_PMKSA_ADD) {
+ bcopy(&iwpmksa->bssid.sa_data[0],
+ &pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
+ WPA2_PMKID_LEN);
+ {
+ uint j;
+ uint k;
+ k = pmkid_list.pmkids.npmkid;
+ BCM_REFERENCE(k);
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkid_array[k].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ pmkid_list.pmkids.npmkid++;
+ }
+ WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+ uint j;
+ WL_TRACE(("PMKID[%d]: %s = ", i,
+ bcm_ether_ntoa(&pmkid_array[i].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+ printf("\n");
+ }
+ WL_TRACE(("\n"));
+ dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+ return 0;
+}
+#endif
+
+static int
+wl_iw_get_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error = 0;
+ int paramid;
+ int paramval;
+ uint32 cipher_combined;
+ int val = 0;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+ paramval = vwrq->value;
+
+ WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+ dev->name, paramid, paramval));
+
+ switch (paramid) {
+
+ case IW_AUTH_WPA_VERSION:
+
+ if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+ val = WPA_AUTH_DISABLED;
+ else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+ else if (paramval & IW_AUTH_WAPI_VERSION_1)
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+
+ if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+ iw->pwsec = paramval;
+ }
+ else {
+ iw->gwsec = paramval;
+ }
+
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+
+ cipher_combined = iw->gwsec | iw->pwsec;
+ val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+ if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ val |= WEP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+ val |= TKIP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+ val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+ val &= ~SMS4_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+ val |= SMS4_ENABLED;
+#endif
+
+ if (iw->privacy_invoked && !val) {
+ WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else if (val) {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+#ifdef WLFBT
+ if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val | AES_ENABLED)) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+ return error;
+ }
+ else if (val == 0) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+ return error;
+ }
+#endif
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA_AUTH_PSK;
+ else
+ val = WPA_AUTH_UNSPECIFIED;
+ }
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA2_AUTH_PSK;
+ else
+ val = WPA2_AUTH_UNSPECIFIED;
+ }
+#ifdef BCMWAPI_WPI
+ if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ WL_ERROR(("Setting the D11auth %d\n", paramval));
+ if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+ val = 0;
+ else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+ val = 1;
+ else
+ error = 1;
+ if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (paramval == 0) {
+ val = 0;
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+ return error;
+ }
+ else {
+
+ }
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED: {
+ int wsec;
+
+ if (paramval == 0) {
+ iw->privacy_invoked = FALSE;
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ iw->privacy_invoked = TRUE;
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ if (!WSEC_ENABLED(wsec)) {
+
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+ }
+ break;
+ }
+
+
+#endif
+
+#ifdef BCMWAPI_WPI
+
+ case IW_AUTH_WAPI_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+ if (paramval) {
+ val |= SMS4_ENABLED;
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+ __FUNCTION__, val, error));
+ return error;
+ }
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+ WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
+ __FUNCTION__, WAPI_AUTH_UNSPECIFIED,
+ error));
+ return error;
+ }
+ }
+
+ break;
+
+#endif
+
+ default:
+ break;
+ }
+ return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error;
+ int paramid;
+ int paramval = 0;
+ int val;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+ paramval = IW_AUTH_WPA_VERSION_DISABLED;
+ else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA;
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA2;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ paramval = iw->pwsec;
+ break;
+
+ case IW_AUTH_CIPHER_GROUP:
+ paramval = iw->gwsec;
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (VAL_PSK(val))
+ paramval = IW_AUTH_KEY_MGMT_PSK;
+ else
+ paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+ return error;
+ if (!val)
+ paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+ else
+ paramval = IW_AUTH_ALG_SHARED_KEY;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val)
+ paramval = TRUE;
+ else
+ paramval = FALSE;
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED:
+ paramval = iw->privacy_invoked;
+ break;
+
+#endif
+ }
+ vwrq->value = paramval;
+ return 0;
+}
+#endif
+
+static const iw_handler wl_iw_handler[] =
+{
+ (iw_handler) wl_iw_config_commit,
+ (iw_handler) wl_iw_get_name,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_freq,
+ (iw_handler) wl_iw_get_freq,
+ (iw_handler) wl_iw_set_mode,
+ (iw_handler) wl_iw_get_mode,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_get_range,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_spy,
+ (iw_handler) wl_iw_get_spy,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wap,
+ (iw_handler) wl_iw_get_wap,
+#if WIRELESS_EXT > 17
+ (iw_handler) wl_iw_mlme,
+#else
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_iscan_get_aplist,
+#if WIRELESS_EXT > 13
+ (iw_handler) wl_iw_iscan_set_scan,
+ (iw_handler) wl_iw_iscan_get_scan,
+#else
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_set_essid,
+ (iw_handler) wl_iw_get_essid,
+ (iw_handler) wl_iw_set_nick,
+ (iw_handler) wl_iw_get_nick,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_rate,
+ (iw_handler) wl_iw_get_rate,
+ (iw_handler) wl_iw_set_rts,
+ (iw_handler) wl_iw_get_rts,
+ (iw_handler) wl_iw_set_frag,
+ (iw_handler) wl_iw_get_frag,
+ (iw_handler) wl_iw_set_txpow,
+ (iw_handler) wl_iw_get_txpow,
+#if WIRELESS_EXT > 10
+ (iw_handler) wl_iw_set_retry,
+ (iw_handler) wl_iw_get_retry,
+#endif
+ (iw_handler) wl_iw_set_encode,
+ (iw_handler) wl_iw_get_encode,
+ (iw_handler) wl_iw_set_power,
+ (iw_handler) wl_iw_get_power,
+#if WIRELESS_EXT > 17
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wpaie,
+ (iw_handler) wl_iw_get_wpaie,
+ (iw_handler) wl_iw_set_wpaauth,
+ (iw_handler) wl_iw_get_wpaauth,
+ (iw_handler) wl_iw_set_encodeext,
+ (iw_handler) wl_iw_get_encodeext,
+ (iw_handler) wl_iw_set_pmksa,
+#endif
+};
+
+#if WIRELESS_EXT > 12
+enum {
+ WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+ WL_IW_SET_VLANMODE,
+ WL_IW_SET_PM
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+ wl_iw_set_leddc,
+ wl_iw_set_vlanmode,
+ wl_iw_set_pm
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+ {
+ WL_IW_SET_LEDDC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_leddc"
+ },
+ {
+ WL_IW_SET_VLANMODE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_vlanmode"
+ },
+ {
+ WL_IW_SET_PM,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_pm"
+ }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+ .num_standard = ARRAYSIZE(wl_iw_handler),
+ .num_private = ARRAY_SIZE(wl_iw_priv_handler),
+ .num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+ .standard = (iw_handler *) wl_iw_handler,
+ .private = wl_iw_priv_handler,
+ .private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+ get_wireless_stats: dhd_get_wireless_stats,
+#endif
+ };
+#endif
+
+int
+wl_iw_ioctl(
+ struct net_device *dev,
+ struct ifreq *rq,
+ int cmd
+)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ struct iw_request_info info;
+ iw_handler handler;
+ char *extra = NULL;
+ size_t token_size = 1;
+ int max_tokens = 0, ret = 0;
+
+ if (cmd < SIOCIWFIRST ||
+ IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+ !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ max_tokens = IW_ESSID_MAX_SIZE + 1;
+ break;
+
+ case SIOCSIWENCODE:
+ case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+ case SIOCSIWENCODEEXT:
+ case SIOCGIWENCODEEXT:
+#endif
+ max_tokens = IW_ENCODING_TOKEN_MAX;
+ break;
+
+ case SIOCGIWRANGE:
+ max_tokens = sizeof(struct iw_range);
+ break;
+
+ case SIOCGIWAPLIST:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_AP;
+ break;
+
+#if WIRELESS_EXT > 13
+ case SIOCGIWSCAN:
+ if (g_iscan)
+ max_tokens = wrq->u.data.length;
+ else
+ max_tokens = IW_SCAN_MAX_DATA;
+ break;
+#endif
+
+ case SIOCSIWSPY:
+ token_size = sizeof(struct sockaddr);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+ case SIOCGIWSPY:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_SPY;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tokens && wrq->u.data.pointer) {
+ if (wrq->u.data.length > max_tokens)
+ return -E2BIG;
+
+ if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+ }
+
+ info.cmd = cmd;
+ info.flags = 0;
+
+ ret = handler(dev, &info, &wrq->u, extra);
+
+ if (extra) {
+ if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+ char* stringBuf, uint buflen)
+{
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent;
+ uint32 inStatus;
+ uint32 inReason;
+ const char* outName;
+ const char* outCause;
+ } conn_fail_event_map_t;
+
+
+# define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+
+
+ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
+ "Conn", "Success"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+ "Conn", "NoNetworks"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ConfigMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
+ "Conn", "EncrypMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
+ "Conn", "RsnMismatch"},
+ {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "AuthTimeout"},
+ {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "AuthFail"},
+ {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
+ "Conn", "AuthNoAck"},
+ {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ReassocFail"},
+ {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "ReassocTimeout"},
+ {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
+ "Conn", "ReassocAbort"},
+ {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
+ "Sup", "ConnSuccess"},
+ {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Sup", "WpaHandshakeFail"},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Deauth"},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "DisassocInd"},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Disassoc"}
+ };
+
+ const char* name = "";
+ const char* cause = NULL;
+ int i;
+
+
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ name = row->outName;
+ cause = row->outCause;
+ break;
+ }
+ }
+
+
+ if (cause) {
+ memset(stringBuf, 0, buflen);
+ snprintf(stringBuf, buflen, "%s %s %02d %02d",
+ name, cause, status, reason);
+ WL_TRACE(("Connection status: %s\n", stringBuf));
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+#if (WIRELESS_EXT > 14)
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+ uint32 event = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+ return TRUE;
+ } else
+ {
+ return FALSE;
+ }
+}
+#endif
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256
+#endif
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd = 0;
+ uint32 event_type = ntoh32(e->event_type);
+ uint16 flags = ntoh16(e->flags);
+ uint32 datalen = ntoh32(e->datalen);
+ uint32 status = ntoh32(e->status);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ switch (event_type) {
+ case WLC_E_TXFAIL:
+ cmd = IWEVTXDROP;
+ break;
+#if WIRELESS_EXT > 14
+ case WLC_E_JOIN:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+ cmd = IWEVREGISTERED;
+ break;
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ cmd = SIOCGIWAP;
+ wrqu.data.length = strlen(extra);
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ break;
+
+ case WLC_E_LINK:
+ case WLC_E_NDIS_LINK:
+ cmd = SIOCGIWAP;
+ wrqu.data.length = strlen(extra);
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ }
+ break;
+ case WLC_E_ACTION_FRAME:
+ cmd = IWEVCUSTOM;
+ if (datalen + 1 <= sizeof(extra)) {
+ wrqu.data.length = datalen + 1;
+ extra[0] = WLC_E_ACTION_FRAME;
+ memcpy(&extra[1], data, datalen);
+ WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+ }
+ break;
+
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ cmd = IWEVCUSTOM;
+ if (sizeof(status) + 1 <= sizeof(extra)) {
+ wrqu.data.length = sizeof(status) + 1;
+ extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+ memcpy(&extra[1], &status, sizeof(status));
+ WL_TRACE(("wl_iw_event status %d \n", status));
+ }
+ break;
+#endif
+#if WIRELESS_EXT > 17
+ case WLC_E_MIC_ERROR: {
+ struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra;
+ cmd = IWEVMICHAELMICFAILURE;
+ wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ micerrevt->flags |= IW_MICFAILURE_GROUP;
+ else
+ micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+ memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+ break;
+ }
+
+ case WLC_E_ASSOC_REQ_IE:
+ cmd = IWEVASSOCREQIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_ASSOC_RESP_IE:
+ cmd = IWEVASSOCRESPIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_PMKID_CACHE: {
+ struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+ pmkid_cand_list_t *pmkcandlist;
+ pmkid_cand_t *pmkidcand;
+ int count;
+
+ if (data == NULL)
+ break;
+
+ cmd = IWEVPMKIDCAND;
+ pmkcandlist = data;
+ count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ pmkidcand = pmkcandlist->pmkid_cand;
+ while (count) {
+ bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+ if (pmkidcand->preauth)
+ iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+ bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+ ETHER_ADDR_LEN);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ pmkidcand++;
+ count--;
+ }
+ break;
+ }
+#endif
+
+ case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+ cmd = SIOCGIWSCAN;
+#endif
+ WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+ if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+ (g_iscan->iscan_state != ISCAN_STATE_IDLE))
+ up(&g_iscan->sysioc_sem);
+ break;
+
+ default:
+
+ break;
+ }
+
+ if (cmd) {
+ if (cmd == SIOCGIWSCAN)
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ else
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+
+#if WIRELESS_EXT > 14
+
+ memset(extra, 0, sizeof(extra));
+ if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+ cmd = IWEVCUSTOM;
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif
+
+#endif
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+ int res = 0;
+ wl_cnt_t cnt;
+ int phy_noise;
+ int rssi;
+ scb_val_t scb_val;
+
+ phy_noise = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+ goto done;
+
+ phy_noise = dtoh32(phy_noise);
+ WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+ scb_val.val = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+ goto done;
+
+ rssi = dtoh32(scb_val.val);
+ WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ wstats->qual.qual = 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ wstats->qual.qual = 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ wstats->qual.qual = 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ wstats->qual.qual = 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ wstats->qual.qual = 4;
+ else
+ wstats->qual.qual = 5;
+
+
+ wstats->qual.level = 0x100 + rssi;
+ wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+ wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+ wstats->qual.updated |= 7;
+#endif
+
+#if WIRELESS_EXT > 11
+ WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+
+ memset(&cnt, 0, sizeof(wl_cnt_t));
+ res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+ if (res)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+ goto done;
+ }
+
+ cnt.version = dtoh16(cnt.version);
+ if (cnt.version != WL_CNT_T_VERSION) {
+ WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, cnt.version));
+ goto done;
+ }
+
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt.rxundec);
+ wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+ wstats->discard.retries = dtoh32(cnt.txfail);
+ wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+ wstats->miss.beacon = 0;
+
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif
+
+done:
+ return res;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ iscan->timer_on = 0;
+ if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+ WL_TRACE(("timer trigger\n"));
+ up(&iscan->sysioc_sem);
+ }
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+ dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+ int err = 0;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+ return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+ int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+ wl_iscan_params_t *params;
+ int err = 0;
+
+ if (ssid && ssid->SSID_len) {
+ params_size += sizeof(wlc_ssid_t);
+ }
+ params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ return -ENOMEM;
+ }
+ memset(params, 0, params_size);
+ ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+ err = wl_iw_iscan_prep(&params->params, ssid);
+
+ if (!err) {
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+
+ (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+ }
+
+ kfree(params);
+ return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+ iscan_buf_t * buf;
+ iscan_buf_t * ptr;
+ wl_iscan_results_t * list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ uint32 status;
+
+
+ if (iscan->list_cur) {
+ buf = iscan->list_cur;
+ iscan->list_cur = buf->next;
+ }
+ else {
+ buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+ if (!buf)
+ return WL_SCAN_RESULTS_ABORTED;
+ buf->next = NULL;
+ if (!iscan->list_hdr)
+ iscan->list_hdr = buf;
+ else {
+ ptr = iscan->list_hdr;
+ while (ptr->next) {
+ ptr = ptr->next;
+ }
+ ptr->next = buf;
+ }
+ }
+ memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ (void) dev_iw_iovar_getbuf(
+ iscan->dev,
+ "iscanresults",
+ &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE,
+ buf->iscan_buf,
+ WLC_IW_ISCAN_MAXLEN);
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_TRACE(("results->count = %d\n", results->count));
+
+ WL_TRACE(("results->buflen = %d\n", results->buflen));
+ status = dtoh32(list_buf->status);
+ return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, extra);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+ uint32 status;
+ iscan_info_t *iscan = (iscan_info_t *)data;
+
+ DAEMONIZE("iscan_sysioc");
+
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (down_interruptible(&iscan->sysioc_sem) == 0) {
+ if (iscan->timer_on) {
+ del_timer(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ switch (status) {
+ case WL_SCAN_RESULTS_PARTIAL:
+ WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_SUCCESS:
+ WL_TRACE(("iscanresults complete\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ case WL_SCAN_RESULTS_PENDING:
+ WL_TRACE(("iscanresults pending\n"));
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_ABORTED:
+ WL_TRACE(("iscanresults aborted\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ default:
+ WL_TRACE(("iscanresults returned unknown status %d\n", status));
+ break;
+ }
+ }
+ complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+ iscan_info_t *iscan = NULL;
+
+ if (!dev)
+ return 0;
+
+ iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ if (!iscan)
+ return -ENOMEM;
+ memset(iscan, 0, sizeof(iscan_info_t));
+ iscan->sysioc_pid = -1;
+
+ g_iscan = iscan;
+ iscan->dev = dev;
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+
+
+
+ iscan->timer_ms = 2000;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (ulong)iscan;
+ iscan->timer.function = wl_iw_timerfunc;
+
+ sema_init(&iscan->sysioc_sem, 0);
+ init_completion(&iscan->sysioc_exited);
+ iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+ if (iscan->sysioc_pid < 0)
+ return -ENOMEM;
+ return 0;
+}
+
+void wl_iw_detach(void)
+{
+ iscan_buf_t *buf;
+ iscan_info_t *iscan = g_iscan;
+ if (!iscan)
+ return;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
+ }
+
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+ kfree(iscan);
+ g_iscan = NULL;
+}
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 000000000000..2afb5a683bbf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,161 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h 291086 2011-10-21 01:17:24Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#define BAND_GET_CMD "GETBAND"
+#define BAND_SET_CMD "SETBAND"
+#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
+#define SETSUSPEND_CMD "SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
+
+#define PNOSETUP_SET_CMD "PNOSETUP "
+#define PNOENABLE_SET_CMD "PNOFORCE"
+#define PNODEBUG_SET_CMD "PNODEBUG"
+#define TXPOWER_SET_CMD "TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+ int target_channel;
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ];
+ char custom_locale[WLC_CNTRY_BUF_SZ];
+ int32 custom_locale_rev;
+};
+
+
+#define WL_IW_RSSI_MINVAL -200
+#define WL_IW_RSSI_NO_SIGNAL -91
+#define WL_IW_RSSI_VERY_LOW -80
+#define WL_IW_RSSI_LOW -70
+#define WL_IW_RSSI_GOOD -68
+#define WL_IW_RSSI_VERY_GOOD -58
+#define WL_IW_RSSI_EXCELLENT -57
+#define WL_IW_RSSI_INVALID 0
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN ((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
+
+#define G_SCAN_RESULTS 8*1024
+#define WE_ADD_EVENT_FIX 0x80
+#define G_WLAN_SET_ON 0
+#define G_WLAN_SET_OFF 1
+
+
+typedef struct wl_iw {
+ char nickname[IW_ESSID_MAX_SIZE];
+
+ struct iw_statistics wstats;
+
+ int spy_num;
+ uint32 pwsec;
+ uint32 gwsec;
+ bool privacy_invoked;
+ struct ether_addr spy_addr[IW_MAX_SPY];
+ struct iw_quality spy_qual[IW_MAX_SPY];
+ void *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+ struct timer_list *timer;
+ struct net_device *dev;
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+
+void wl_iw_detach(void);
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
new file mode 100644
index 000000000000..af2586326c20
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -0,0 +1,422 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_mon.c 280623 2011-08-30 14:49:39Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+ MONITOR_STATE_DEINIT = 0x0,
+ MONITOR_STATE_INIT = 0x1,
+ MONITOR_STATE_INTERFACE_ADDED = 0x2,
+ MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+ int radiotap_enabled;
+ struct net_device* real_ndev; /* The real interface that the monitor is on */
+ struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+ void *dhd_pub;
+ monitor_states_t monitor_state;
+ monitor_interface mon_if[DHD_MAX_IFS];
+ struct mutex lock; /* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+ .ndo_open = dhd_mon_if_open,
+ .ndo_stop = dhd_mon_if_stop,
+ .ndo_start_xmit = dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+ .ndo_set_mac_address = dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+ struct net_device *ndev_found = NULL;
+
+ int i;
+ int len = 0;
+ int last_name_len = 0;
+ struct net_device *ndev;
+
+ /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+ * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+ * iface would be mon-p2p0-0.
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+ /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+ * it matches, then this netdev is the corresponding real_netdev.
+ */
+ if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+ len = strlen("p2p");
+ } else {
+ /* if p2p- is not present, then the IFNAMSIZ have reached and name
+ * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+ */
+ len = 0;
+ }
+ if (ndev && strstr(name, (ndev->name + len))) {
+ if (strlen(ndev->name) > last_name_len) {
+ ndev_found = ndev;
+ last_name_len = strlen(ndev->name);
+ }
+ }
+ }
+
+ return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev)
+ return &g_monitor.mon_if[i];
+ }
+
+ return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ unsigned short frame_ctl;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ monitor_interface* mon_if;
+
+ MON_PRINT("enter\n");
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ goto fail;
+ }
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+ /* Check if the QoS bit is set */
+ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (dot11_hdr->frame_control & 0x0080)
+ qos_len = 2;
+ if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+ dot11_hdr_len += 6;
+
+ memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+ memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+ /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+ * for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+ pdata = (unsigned char*)skb->data;
+ memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+ memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+ PKTSETPRIO(skb, 0);
+
+ MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+ /* Use the real net device to transmit the packet */
+ ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+ return ret;
+ }
+fail:
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+ return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+ int i;
+ int idx = -1;
+ int ret = 0;
+ struct net_device* ndev = NULL;
+ dhd_linux_monitor_t **dhd_mon;
+
+ mutex_lock(&g_monitor.lock);
+
+ MON_TRACE("enter, if name: %s\n", name);
+ if (!name || !new_ndev) {
+ MON_PRINT("invalid parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Find a vacancy
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (g_monitor.mon_if[i].mon_ndev == NULL) {
+ idx = i;
+ break;
+ }
+ if (idx == -1) {
+ MON_PRINT("exceeds maximum interfaces\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+ if (!ndev) {
+ MON_PRINT("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(ndev->name, name, IFNAMSIZ);
+ ndev->name[IFNAMSIZ - 1] = 0;
+ ndev->netdev_ops = &dhd_mon_if_ops;
+
+ ret = register_netdevice(ndev);
+ if (ret) {
+ MON_PRINT(" register_netdevice failed (%d)\n", ret);
+ goto out;
+ }
+
+ *new_ndev = ndev;
+ g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+ g_monitor.mon_if[idx].mon_ndev = ndev;
+ g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+ dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+ *dhd_mon = &g_monitor;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+ MON_PRINT("net device returned: 0x%p\n", ndev);
+ MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+ if (ret && ndev)
+ free_netdev(ndev);
+
+ mutex_unlock(&g_monitor.lock);
+ return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+ int i;
+ bool rollback_lock = false;
+ if (!ndev)
+ return -EINVAL;
+ mutex_lock(&g_monitor.lock);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev ||
+ g_monitor.mon_if[i].real_ndev == ndev) {
+ g_monitor.mon_if[i].real_ndev = NULL;
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(g_monitor.mon_if[i].mon_ndev);
+ free_netdev(g_monitor.mon_if[i].mon_ndev);
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+ break;
+ }
+ }
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+
+ if (g_monitor.monitor_state !=
+ MONITOR_STATE_INTERFACE_DELETED)
+ MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n",
+ ndev);
+ mutex_unlock(&g_monitor.lock);
+
+ return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+ if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+ g_monitor.dhd_pub = dhd_pub;
+ mutex_init(&g_monitor.lock);
+ g_monitor.monitor_state = MONITOR_STATE_INIT;
+ }
+ return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+ int i;
+ struct net_device *ndev;
+ bool rollback_lock = false;
+ mutex_lock(&g_monitor.lock);
+ if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = g_monitor.mon_if[i].mon_ndev;
+ if (ndev) {
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ g_monitor.mon_if[i].real_ndev = NULL;
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ }
+ }
+ g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+ }
+ mutex_unlock(&g_monitor.lock);
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 000000000000..f83df791e133
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,368 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WLDEV_ERROR(args) \
+ do { \
+ printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \
+ printk args; \
+ } while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ s32 ret = 0;
+ struct wl_ioctl ioc;
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+ return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+ s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, u32 buflen)
+{
+ s32 iolen = 0;
+
+ iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+ return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+
+ if (bssidx == 0) {
+ return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+ (s8 *) iovar_buf, buflen);
+ }
+
+ prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+ namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+ if (buflen < 0 || iolen > (u32)buflen)
+ {
+ WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+ return BCME_BUFTOOSHORT;
+ }
+
+ p = (s8 *)iovar_buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, iovar_name, namelen);
+ p += namelen;
+
+ /* bss config index as first param */
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(u32));
+ p += sizeof(u32);
+
+ /* parameter buffer follows */
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+
+ wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+int wldev_get_link_speed(
+ struct net_device *dev, int *plink_speed)
+{
+ int error;
+
+ if (!plink_speed)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+ if (unlikely(error))
+ return error;
+
+ /* Convert internal 500Kbps to Kbps */
+ *plink_speed *= 500;
+ return error;
+}
+
+int wldev_get_rssi(
+ struct net_device *dev, int *prssi)
+{
+ scb_val_t scb_val;
+ int error;
+
+ if (!prssi)
+ return -ENOMEM;
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+ if (unlikely(error))
+ return error;
+
+ *prssi = dtoh32(scb_val.val);
+ return error;
+}
+
+int wldev_get_ssid(
+ struct net_device *dev, wlc_ssid_t *pssid)
+{
+ int error;
+
+ if (!pssid)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+ if (unlikely(error))
+ return error;
+ pssid->SSID_len = dtoh32(pssid->SSID_len);
+ return error;
+}
+
+int wldev_get_band(
+ struct net_device *dev, uint *pband)
+{
+ int error;
+
+ error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+ return error;
+}
+
+int wldev_set_band(
+ struct net_device *dev, uint band)
+{
+ int error = -1;
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+ error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1);
+ }
+ return error;
+}
+
+int wldev_set_country(
+ struct net_device *dev, char *country_code)
+{
+ int error = -1;
+ wl_country_t cspec = {{0}, 0, {0}};
+ scb_val_t scbval;
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ if (!country_code)
+ return error;
+
+ error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0)
+ WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+
+ if ((error < 0) ||
+ (strncmp(country_code, smbuf, WLC_CNTRY_BUF_SZ) != 0)) {
+ bzero(&scbval, sizeof(scb_val_t));
+ error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), 1);
+ if (error < 0) {
+ WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ }
+ cspec.rev = -1;
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+ get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+ error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0) {
+ WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return error;
+ }
+ dhd_bus_country_set(dev, &cspec);
+ WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 000000000000..16a071f5e5a6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,110 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ * netdev_ops->ndo_do_ioctl in new kernels)
+ * @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ * indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+extern int wldev_set_country(struct net_device *dev, char *country_code);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+ int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index d604b4036a76..3726cd6fcd75 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3273,7 +3273,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
+ strlcpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da21328ca8ed..a790599fe2c2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1151,13 +1151,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
next_reclaimed = ssn;
}
- if (tid != IWL_TID_NON_QOS) {
- priv->tid_data[sta_id][tid].next_reclaimed =
- next_reclaimed;
- IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
- next_reclaimed);
- }
-
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1208,11 +1201,28 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+ /*
+ * W/A for FW bug - the seq_ctl isn't updated when the
+ * queues are flushed. Fetch it from the packet itself
+ */
+ if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+ next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
+ next_reclaimed =
+ SEQ_TO_SN(next_reclaimed + 0x10);
+ }
+
is_offchannel_skb =
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ }
+
WARN_ON(!is_agg && freed != 1);
/*
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index dad4c4aad91f..8389cd38338b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1166,6 +1166,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
+ return IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a875499f8945..efe525be27dd 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1709,7 +1709,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
NL80211_CHAN_NO_HT)
config_bands |= BAND_GN;
} else {
- if (cfg80211_get_chandef_type(&params->chandef) !=
+ if (cfg80211_get_chandef_type(&params->chandef) ==
NL80211_CHAN_NO_HT)
config_bands = BAND_A;
else
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index cb682561c438..60e88b58039d 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
*/
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
- bool cancel_flag = false;
int status;
struct cmd_ctrl_node *cmd_queued;
@@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
atomic_inc(&adapter->cmd_pending);
/* Wait for completion */
- wait_event_interruptible(adapter->cmd_wait_q.wait,
- *(cmd_queued->condition));
- if (!*(cmd_queued->condition))
- cancel_flag = true;
-
- if (cancel_flag) {
- mwifiex_cancel_pending_ioctl(adapter);
- dev_dbg(adapter->dev, "cmd cancel\n");
+ status = wait_event_interruptible(adapter->cmd_wait_q.wait,
+ *(cmd_queued->condition));
+ if (status) {
+ dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ return status;
}
status = adapter->cmd_wait_q.status;
@@ -496,8 +492,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
return false;
}
- wait_event_interruptible(adapter->hs_activate_wait_q,
- adapter->hs_activate_wait_q_woken);
+ if (wait_event_interruptible(adapter->hs_activate_wait_q,
+ adapter->hs_activate_wait_q_woken)) {
+ dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+ return false;
+ }
return true;
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f221b95b90b3..83564d36e801 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -4250,9 +4250,11 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p->amsdu_enabled = 0;
rc = mwl8k_post_cmd(hw, &cmd->header);
+ if (!rc)
+ rc = p->station_id;
kfree(cmd);
- return rc ? rc : p->station_id;
+ return rc;
}
static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 1d5d3604e3e0..246e5352f2e1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -692,7 +692,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 39cc7938eedf..3d8536bb0d2b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -1106,7 +1106,7 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723ae_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index bafd2bbcaf65..c18e5bf444fa 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -739,7 +739,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
/**
* pci_sriov_set_totalvfs -- reduce the TotalVFs available
* @dev: the PCI PF device
- * numvfs: number that should be used for TotalVFs supported
+ * @numvfs: number that should be used for TotalVFs supported
*
* Should be called from PF driver's probe routine with
* device's mutex held.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 06f4eb7ab87e..afed7018a2b5 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -125,8 +125,11 @@ static const struct key_entry acer_wmi_keymap[] = {
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
{KE_IGNORE, 0x81, {KEY_SLEEP} },
- {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+ {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+ {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+ {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} },
{KE_END, 0}
};
@@ -147,6 +150,7 @@ struct event_return_value {
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
+#define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */
struct lm_input_params {
u8 function_num; /* Function Number */
@@ -875,7 +879,7 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
- u32 tmp;
+ u32 tmp = 0;
acpi_status status;
status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
@@ -884,14 +888,14 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
return status;
obj = (union acpi_object *) result.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- tmp = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- tmp = (u32) obj->integer.value;
- } else {
- tmp = 0;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
+ }
}
if (out)
@@ -1193,12 +1197,14 @@ static acpi_status WMID_set_capabilities(void)
return status;
obj = (union acpi_object *) out.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- devices = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- devices = (u32) obj->integer.value;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ }
} else {
kfree(out.pointer);
return AE_ERROR;
@@ -1676,6 +1682,7 @@ static void acer_wmi_notify(u32 value, void *context)
acpi_status status;
u16 device_state;
const struct key_entry *key;
+ u32 scancode;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1712,6 +1719,7 @@ static void acer_wmi_notify(u32 value, void *context)
pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
} else {
+ scancode = return_value.key_num;
switch (key->keycode) {
case KEY_WLAN:
case KEY_BLUETOOTH:
@@ -1725,9 +1733,11 @@ static void acer_wmi_notify(u32 value, void *context)
rfkill_set_sw_state(bluetooth_rfkill,
!(device_state & ACER_WMID3_GDS_BLUETOOTH));
break;
+ case KEY_TOUCHPAD_TOGGLE:
+ scancode = (device_state & ACER_WMID3_GDS_TOUCHPAD) ?
+ KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF;
}
- sparse_keymap_report_entry(acer_wmi_input_dev, key,
- 1, true);
+ sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
}
break;
case WMID_ACCEL_EVENT:
@@ -1946,12 +1956,14 @@ static u32 get_wmid_devices(void)
return 0;
obj = (union acpi_object *) out.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER &&
- (obj->buffer.length == sizeof(u32) ||
- obj->buffer.length == sizeof(u64))) {
- devices = *((u32 *) obj->buffer.pointer);
- } else if (obj->type == ACPI_TYPE_INTEGER) {
- devices = (u32) obj->integer.value;
+ if (obj) {
+ if (obj->type == ACPI_TYPE_BUFFER &&
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
+ }
}
kfree(out.pointer);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index ec1d3bc2dbe2..fcde4e528819 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -860,8 +860,10 @@ static ssize_t show_infos(struct device *dev,
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
+ * 0x40 for WWAN, 0x10 for WIMAX.
* The significance of others is yet to be found.
- * If we don't find the method, we assume the device are present.
+ * We don't currently use this for device detection, and it
+ * takes several seconds to run on some systems.
*/
rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
if (!ACPI_FAILURE(rv))
@@ -1682,7 +1684,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
- unsigned long long bsts_result, hwrs_result;
+ unsigned long long bsts_result;
char *string = NULL;
acpi_status status;
@@ -1741,20 +1743,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
return -ENOMEM;
}
- if (*string)
+ if (string)
pr_notice(" %s model detected\n", string);
- /*
- * The HWRS method return informations about the hardware.
- * 0x80 bit is for WLAN, 0x100 for Bluetooth,
- * 0x40 for WWAN, 0x10 for WIMAX.
- * The significance of others is yet to be found.
- */
- status =
- acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
- if (!ACPI_FAILURE(status))
- pr_notice(" HWRS returned %x", (int)hwrs_result);
-
if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
asus->have_rsts = true;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index dd90d15f5210..71623a2ff3e8 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1523,6 +1523,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
},
.driver_data = &samsung_broken_acpi_video,
},
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "N250P",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ },
+ .driver_data = &samsung_broken_acpi_video,
+ },
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index daaddec68def..b8ad71f7863f 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
{
+ int ret = 0;
size_t len = len;
union acpi_object *object = __call_snc_method(handle, name, value);
if (!object)
return -EINVAL;
- if (object->type == ACPI_TYPE_BUFFER)
+ if (object->type == ACPI_TYPE_BUFFER) {
len = MIN(buflen, object->buffer.length);
+ memcpy(buffer, object->buffer.pointer, len);
- else if (object->type == ACPI_TYPE_INTEGER)
+ } else if (object->type == ACPI_TYPE_INTEGER) {
len = MIN(buflen, sizeof(object->integer.value));
+ memcpy(buffer, &object->integer.value, len);
- else {
+ } else {
pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
ACPI_TYPE_BUFFER, object->type);
- kfree(object);
- return -EINVAL;
+ ret = -EINVAL;
}
- memcpy(buffer, object->buffer.pointer, len);
kfree(object);
- return 0;
+ return ret;
}
struct sony_nc_handles {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 9f45e2f77d53..85364b0e6688 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -203,6 +203,16 @@ config BATTERY_MAX17042
with MAX17042. This driver also supports max17047/50 chips which are
improved version of max17042.
+config BATTERY_ANDROID
+ tristate "Battery driver for Android"
+ help
+ Say Y to enable generic support for battery charging according
+ to common Android policies.
+ This driver adds periodic battery level and health monitoring,
+ kernel log reporting and other debugging features, common board
+ battery file glue logic for battery/case temperature sensors,
+ etc.
+
config BATTERY_Z2
tristate "Z2 battery driver"
depends on I2C && MACH_ZIPIT2
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 22c8913382c0..0bfa588d4c22 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -51,4 +51,6 @@ obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+obj-$(CONFIG_BATTERY_ANDROID) += android_battery.o
obj-$(CONFIG_POWER_RESET) += reset/
+
diff --git a/drivers/power/android_battery.c b/drivers/power/android_battery.c
new file mode 100644
index 000000000000..dbba090a51e1
--- /dev/null
+++ b/drivers/power/android_battery.c
@@ -0,0 +1,767 @@
+/*
+ * android_battery.c
+ * Android Battery Driver
+ *
+ * Copyright (C) 2012 Google, Inc.
+ * Copyright (C) 2012 Samsung Electronics
+ *
+ * Based on work by himihee.seo@samsung.com, ms925.kim@samsung.com, and
+ * joshua.chang@samsung.com.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+#include <linux/workqueue.h>
+#include <linux/alarmtimer.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/platform_data/android_battery.h>
+
+#define FAST_POLL (1 * 60)
+#define SLOW_POLL (10 * 60)
+
+struct android_bat_data {
+ struct android_bat_platform_data *pdata;
+ struct android_bat_callbacks callbacks;
+
+ struct device *dev;
+
+ struct power_supply psy_bat;
+ struct power_supply psy_usb;
+ struct power_supply psy_ac;
+
+ struct wake_lock monitor_wake_lock;
+ struct wake_lock charger_wake_lock;
+
+ int charge_source;
+
+ int batt_temp;
+ int batt_current;
+ unsigned int batt_health;
+ unsigned int batt_vcell;
+ unsigned int batt_soc;
+ unsigned int charging_status;
+ bool recharging;
+ unsigned long charging_start_time;
+
+ struct workqueue_struct *monitor_wqueue;
+ struct work_struct monitor_work;
+ struct work_struct charger_work;
+
+ struct alarm monitor_alarm;
+ ktime_t last_poll;
+
+ struct dentry *debugfs_entry;
+};
+
+static char *supply_list[] = {
+ "android-battery",
+};
+
+static enum power_supply_property android_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static enum power_supply_property android_power_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static DEFINE_MUTEX(android_bat_state_lock);
+
+static void android_bat_update_data(struct android_bat_data *battery);
+static int android_bat_enable_charging(struct android_bat_data *battery,
+ bool enable);
+
+static char *charge_source_str(int charge_source)
+{
+ switch (charge_source) {
+ case CHARGE_SOURCE_NONE:
+ return "none";
+ case CHARGE_SOURCE_AC:
+ return "ac";
+ case CHARGE_SOURCE_USB:
+ return "usb";
+ default:
+ break;
+ }
+
+ return "?";
+}
+
+static int android_bat_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery =
+ container_of(ps, struct android_bat_data, psy_bat);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battery->charging_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = battery->batt_health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = battery->batt_temp;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ android_bat_update_data(battery);
+ val->intval = battery->batt_vcell;
+ if (val->intval == -1)
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery->batt_soc;
+ if (val->intval == -1)
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ android_bat_update_data(battery);
+ val->intval = battery->batt_current;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int android_usb_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery = container_of(ps,
+ struct android_bat_data, psy_usb);
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ val->intval = (battery->charge_source == CHARGE_SOURCE_USB);
+
+ return 0;
+}
+
+static int android_ac_get_property(struct power_supply *ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct android_bat_data *battery = container_of(ps,
+ struct android_bat_data, psy_ac);
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ val->intval = (battery->charge_source == CHARGE_SOURCE_AC);
+
+ return 0;
+}
+
+static void android_bat_get_temp(struct android_bat_data *battery)
+{
+ int batt_temp = 42; /* 4.2C */
+ int health = battery->batt_health;
+
+ if (battery->pdata->get_temperature)
+ battery->pdata->get_temperature(&batt_temp);
+
+ if (battery->charge_source != CHARGE_SOURCE_NONE) {
+ if (batt_temp >= battery->pdata->temp_high_threshold) {
+ if (health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
+ pr_info("battery overheat (%d>=%d), " \
+ "charging unavailable\n",
+ batt_temp,
+ battery->pdata->temp_high_threshold);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_OVERHEAT;
+ }
+ } else if (batt_temp <= battery->pdata->temp_high_recovery &&
+ batt_temp >= battery->pdata->temp_low_recovery) {
+ if (health == POWER_SUPPLY_HEALTH_OVERHEAT ||
+ health == POWER_SUPPLY_HEALTH_COLD) {
+ pr_info("battery recovery (%d,%d~%d)," \
+ "charging available\n",
+ batt_temp,
+ battery->pdata->temp_low_recovery,
+ battery->pdata->temp_high_recovery);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_GOOD;
+ }
+ } else if (batt_temp <= battery->pdata->temp_low_threshold) {
+ if (health != POWER_SUPPLY_HEALTH_COLD &&
+ health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
+ pr_info("battery cold (%d <= %d)," \
+ "charging unavailable\n",
+ batt_temp,
+ battery->pdata->temp_low_threshold);
+ battery->batt_health =
+ POWER_SUPPLY_HEALTH_COLD;
+ }
+ }
+ }
+
+ battery->batt_temp = batt_temp;
+}
+
+/*
+ * android_bat_state_lock not held, may call back into
+ * android_bat_charge_source_changed. Gathering data here can be
+ * non-atomic; updating our state based on the data may need to be
+ * atomic.
+ */
+
+static void android_bat_update_data(struct android_bat_data *battery)
+{
+ int ret;
+ int v;
+
+ if (battery->pdata->poll_charge_source)
+ battery->charge_source = battery->pdata->poll_charge_source();
+
+ if (battery->pdata->get_voltage_now) {
+ ret = battery->pdata->get_voltage_now();
+ battery->batt_vcell = ret >= 0 ? ret : 4242000;
+ }
+
+ if (battery->pdata->get_capacity) {
+ ret = battery->pdata->get_capacity();
+ battery->batt_soc = ret >= 0 ? ret : 42;
+ }
+
+ if (battery->pdata->get_current_now) {
+ ret = battery->pdata->get_current_now(&v);
+
+ if (!ret)
+ battery->batt_current = v;
+ }
+
+ android_bat_get_temp(battery);
+}
+
+static void android_bat_set_charge_time(struct android_bat_data *battery,
+ bool enable)
+{
+ if (enable && !battery->charging_start_time) {
+ struct timespec cur_time;
+
+ get_monotonic_boottime(&cur_time);
+ /* record start time for charge timeout timer */
+ battery->charging_start_time = cur_time.tv_sec;
+ } else if (!enable) {
+ /* clear charge timeout timer */
+ battery->charging_start_time = 0;
+ }
+}
+
+static int android_bat_enable_charging(struct android_bat_data *battery,
+ bool enable)
+{
+ if (enable && (battery->batt_health != POWER_SUPPLY_HEALTH_GOOD)) {
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return -EPERM;
+ }
+
+ if (enable) {
+ if (battery->pdata && battery->pdata->set_charging_current)
+ battery->pdata->set_charging_current
+ (battery->charge_source);
+ }
+
+ if (battery->pdata && battery->pdata->set_charging_enable)
+ battery->pdata->set_charging_enable(enable);
+
+ android_bat_set_charge_time(battery, enable);
+ pr_info("battery: enable=%d charger: %s\n", enable,
+ charge_source_str(battery->charge_source));
+ return 0;
+}
+
+static bool android_bat_charge_timeout(struct android_bat_data *battery,
+ unsigned long timeout)
+{
+ struct timespec cur_time;
+
+ if (!battery->charging_start_time)
+ return 0;
+
+ get_monotonic_boottime(&cur_time);
+ pr_debug("%s: Start time: %ld, End time: %ld, current time: %ld\n",
+ __func__, battery->charging_start_time,
+ battery->charging_start_time + timeout,
+ cur_time.tv_sec);
+ return cur_time.tv_sec >= battery->charging_start_time + timeout;
+}
+
+static void android_bat_charging_timer(struct android_bat_data *battery)
+{
+ if (!battery->charging_start_time &&
+ battery->charging_status == POWER_SUPPLY_STATUS_CHARGING) {
+ android_bat_enable_charging(battery, true);
+ battery->recharging = true;
+ pr_debug("%s: charge status charging but timer is expired\n",
+ __func__);
+ } else if (battery->charging_start_time == 0) {
+ pr_debug("%s: charging_start_time never initialized\n",
+ __func__);
+ return;
+ }
+
+ if (android_bat_charge_timeout(
+ battery,
+ battery->recharging ? battery->pdata->recharging_time :
+ battery->pdata->full_charging_time)) {
+ android_bat_enable_charging(battery, false);
+ if (battery->batt_vcell >
+ battery->pdata->recharging_voltage &&
+ battery->batt_soc == 100)
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_FULL;
+ battery->recharging = false;
+ battery->charging_start_time = 0;
+ pr_info("battery: charging timer expired\n");
+ }
+
+ return;
+}
+
+static void android_bat_charge_source_changed(struct android_bat_callbacks *ptr,
+ int charge_source)
+{
+ struct android_bat_data *battery =
+ container_of(ptr, struct android_bat_data, callbacks);
+
+ wake_lock(&battery->charger_wake_lock);
+ mutex_lock(&android_bat_state_lock);
+ battery->charge_source = charge_source;
+
+ pr_info("battery: charge source type was changed: %s\n",
+ charge_source_str(battery->charge_source));
+
+ mutex_unlock(&android_bat_state_lock);
+ queue_work(battery->monitor_wqueue, &battery->charger_work);
+}
+
+static void android_bat_set_full_status(struct android_bat_callbacks *ptr)
+{
+ struct android_bat_data *battery =
+ container_of(ptr, struct android_bat_data, callbacks);
+
+ mutex_lock(&android_bat_state_lock);
+ pr_info("battery: battery full\n");
+ battery->charging_status = POWER_SUPPLY_STATUS_FULL;
+ android_bat_enable_charging(battery, false);
+ battery->recharging = false;
+ mutex_unlock(&android_bat_state_lock);
+ power_supply_changed(&battery->psy_bat);
+}
+
+static void android_bat_charger_work(struct work_struct *work)
+{
+ struct android_bat_data *battery =
+ container_of(work, struct android_bat_data, charger_work);
+
+ mutex_lock(&android_bat_state_lock);
+
+ switch (battery->charge_source) {
+ case CHARGE_SOURCE_NONE:
+ battery->charging_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ android_bat_enable_charging(battery, false);
+ battery->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ battery->recharging = false;
+ battery->charging_start_time = 0;
+ break;
+ case CHARGE_SOURCE_USB:
+ case CHARGE_SOURCE_AC:
+ /*
+ * If charging status indicates a charger was already
+ * connected prior to this and the status is something
+ * other than charging ("full" or "not-charging"), leave
+ * the status alone.
+ */
+ if (battery->charging_status ==
+ POWER_SUPPLY_STATUS_DISCHARGING ||
+ battery->charging_status == POWER_SUPPLY_STATUS_UNKNOWN)
+ battery->charging_status = POWER_SUPPLY_STATUS_CHARGING;
+
+ /*
+ * Don't re-enable charging if the battery is full and we
+ * are not actively re-charging it, or if "not-charging"
+ * status is set.
+ */
+ if (!((battery->charging_status == POWER_SUPPLY_STATUS_FULL
+ && !battery->recharging) || battery->charging_status ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING))
+ android_bat_enable_charging(battery, true);
+
+ break;
+ default:
+ pr_err("%s: Invalid charger type\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&android_bat_state_lock);
+ wake_lock_timeout(&battery->charger_wake_lock, HZ * 2);
+ power_supply_changed(&battery->psy_ac);
+ power_supply_changed(&battery->psy_usb);
+}
+
+
+static void android_bat_monitor_set_alarm(struct android_bat_data *battery,
+ int seconds)
+{
+ alarm_start(&battery->monitor_alarm,
+ ktime_add(battery->last_poll, ktime_set(seconds, 0)));
+}
+
+static void android_bat_monitor_work(struct work_struct *work)
+{
+ struct android_bat_data *battery =
+ container_of(work, struct android_bat_data, monitor_work);
+ struct timespec cur_time;
+
+ wake_lock(&battery->monitor_wake_lock);
+ android_bat_update_data(battery);
+ mutex_lock(&android_bat_state_lock);
+
+ switch (battery->charging_status) {
+ case POWER_SUPPLY_STATUS_FULL:
+ if (battery->batt_vcell < battery->pdata->recharging_voltage &&
+ !battery->recharging) {
+ battery->recharging = true;
+ android_bat_enable_charging(battery, true);
+ pr_info("battery: start recharging, v=%d\n",
+ battery->batt_vcell/1000);
+ }
+ break;
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ switch (battery->batt_health) {
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ case POWER_SUPPLY_HEALTH_DEAD:
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ battery->charging_status =
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ android_bat_enable_charging(battery, false);
+
+ pr_info("battery: Not charging, health=%d\n",
+ battery->batt_health);
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (battery->batt_health == POWER_SUPPLY_HEALTH_GOOD) {
+ pr_info("battery: battery health recovered\n");
+ if (battery->charge_source != CHARGE_SOURCE_NONE) {
+ android_bat_enable_charging(battery, true);
+ battery->charging_status
+ = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ battery->charging_status
+ = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ }
+ break;
+ default:
+ pr_err("%s: Undefined battery status: %d\n", __func__,
+ battery->charging_status);
+ break;
+ }
+
+ android_bat_charging_timer(battery);
+ get_monotonic_boottime(&cur_time);
+ pr_info("battery: l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n",
+ battery->batt_soc, battery->batt_vcell/1000,
+ battery->batt_current, battery->batt_temp < 0 ? "-" : "",
+ abs(battery->batt_temp / 10), abs(battery->batt_temp % 10),
+ battery->batt_health, battery->charging_status,
+ battery->recharging ? "r" : "",
+ battery->charging_start_time ?
+ cur_time.tv_sec - battery->charging_start_time : 0,
+ charge_source_str(battery->charge_source));
+ mutex_unlock(&android_bat_state_lock);
+ power_supply_changed(&battery->psy_bat);
+ battery->last_poll = ktime_get_boottime();
+ android_bat_monitor_set_alarm(battery, FAST_POLL);
+ wake_unlock(&battery->monitor_wake_lock);
+ return;
+}
+
+static enum alarmtimer_restart android_bat_monitor_alarm(
+ struct alarm *alarm, ktime_t now)
+{
+ struct android_bat_data *battery =
+ container_of(alarm, struct android_bat_data, monitor_alarm);
+
+ wake_lock(&battery->monitor_wake_lock);
+ queue_work(battery->monitor_wqueue, &battery->monitor_work);
+ return ALARMTIMER_NORESTART;
+}
+
+static int android_power_debug_dump(struct seq_file *s, void *unused)
+{
+ struct android_bat_data *battery = s->private;
+ struct timespec cur_time;
+
+ android_bat_update_data(battery);
+ get_monotonic_boottime(&cur_time);
+ mutex_lock(&android_bat_state_lock);
+ seq_printf(s, "l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n",
+ battery->batt_soc, battery->batt_vcell/1000,
+ battery->batt_current, battery->batt_temp < 0 ? "-" : "",
+ abs(battery->batt_temp / 10), abs(battery->batt_temp % 10),
+ battery->batt_health, battery->charging_status,
+ battery->recharging ? "r" : "",
+ battery->charging_start_time ?
+ cur_time.tv_sec - battery->charging_start_time : 0,
+ charge_source_str(battery->charge_source));
+ mutex_unlock(&android_bat_state_lock);
+ return 0;
+}
+
+static int android_power_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, android_power_debug_dump, inode->i_private);
+}
+
+static const struct file_operations android_power_debug_fops = {
+ .open = android_power_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int android_bat_probe(struct platform_device *pdev)
+{
+ struct android_bat_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct android_bat_data *battery;
+ int ret = 0;
+
+ dev_info(&pdev->dev, "Android Battery Driver\n");
+ battery = kzalloc(sizeof(*battery), GFP_KERNEL);
+ if (!battery)
+ return -ENOMEM;
+
+ battery->pdata = pdata;
+ if (!battery->pdata) {
+ pr_err("%s : No platform data\n", __func__);
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+
+ battery->dev = &pdev->dev;
+ platform_set_drvdata(pdev, battery);
+ battery->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+
+ battery->psy_bat.name = "android-battery",
+ battery->psy_bat.type = POWER_SUPPLY_TYPE_BATTERY,
+ battery->psy_bat.properties = android_battery_props,
+ battery->psy_bat.num_properties = ARRAY_SIZE(android_battery_props),
+ battery->psy_bat.get_property = android_bat_get_property,
+
+ battery->psy_usb.name = "android-usb",
+ battery->psy_usb.type = POWER_SUPPLY_TYPE_USB,
+ battery->psy_usb.supplied_to = supply_list,
+ battery->psy_usb.num_supplicants = ARRAY_SIZE(supply_list),
+ battery->psy_usb.properties = android_power_props,
+ battery->psy_usb.num_properties = ARRAY_SIZE(android_power_props),
+ battery->psy_usb.get_property = android_usb_get_property,
+
+ battery->psy_ac.name = "android-ac",
+ battery->psy_ac.type = POWER_SUPPLY_TYPE_MAINS,
+ battery->psy_ac.supplied_to = supply_list,
+ battery->psy_ac.num_supplicants = ARRAY_SIZE(supply_list),
+ battery->psy_ac.properties = android_power_props,
+ battery->psy_ac.num_properties = ARRAY_SIZE(android_power_props),
+ battery->psy_ac.get_property = android_ac_get_property;
+
+ battery->batt_vcell = -1;
+ battery->batt_soc = -1;
+
+ wake_lock_init(&battery->monitor_wake_lock, WAKE_LOCK_SUSPEND,
+ "android-battery-monitor");
+ wake_lock_init(&battery->charger_wake_lock, WAKE_LOCK_SUSPEND,
+ "android-chargerdetect");
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_bat);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_bat\n",
+ __func__);
+ goto err_psy_bat_reg;
+ }
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_usb);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_usb\n",
+ __func__);
+ goto err_psy_usb_reg;
+ }
+
+ ret = power_supply_register(&pdev->dev, &battery->psy_ac);
+ if (ret) {
+ dev_err(battery->dev, "%s: failed to register psy_ac\n",
+ __func__);
+ goto err_psy_ac_reg;
+ }
+
+ battery->monitor_wqueue =
+ alloc_workqueue(dev_name(&pdev->dev), WQ_FREEZABLE, 1);
+ if (!battery->monitor_wqueue) {
+ dev_err(battery->dev, "%s: fail to create workqueue\n",
+ __func__);
+ goto err_wq;
+ }
+
+ INIT_WORK(&battery->monitor_work, android_bat_monitor_work);
+ INIT_WORK(&battery->charger_work, android_bat_charger_work);
+
+ battery->callbacks.charge_source_changed =
+ android_bat_charge_source_changed;
+ battery->callbacks.battery_set_full =
+ android_bat_set_full_status;
+ if (battery->pdata && battery->pdata->register_callbacks)
+ battery->pdata->register_callbacks(&battery->callbacks);
+
+ /* get initial charger status */
+ if (battery->pdata->poll_charge_source)
+ battery->charge_source = battery->pdata->poll_charge_source();
+
+ wake_lock(&battery->charger_wake_lock);
+ queue_work(battery->monitor_wqueue, &battery->charger_work);
+
+ wake_lock(&battery->monitor_wake_lock);
+ battery->last_poll = ktime_get_boottime();
+ alarm_init(&battery->monitor_alarm, ALARM_BOOTTIME,
+ android_bat_monitor_alarm);
+ queue_work(battery->monitor_wqueue, &battery->monitor_work);
+
+ battery->debugfs_entry =
+ debugfs_create_file("android-power", S_IRUGO, NULL,
+ battery, &android_power_debug_fops);
+ if (!battery->debugfs_entry)
+ pr_err("failed to create android-power debugfs entry\n");
+
+ return 0;
+
+err_wq:
+ power_supply_unregister(&battery->psy_ac);
+err_psy_ac_reg:
+ power_supply_unregister(&battery->psy_usb);
+err_psy_usb_reg:
+ power_supply_unregister(&battery->psy_bat);
+err_psy_bat_reg:
+ wake_lock_destroy(&battery->monitor_wake_lock);
+ wake_lock_destroy(&battery->charger_wake_lock);
+err_pdata:
+ kfree(battery);
+
+ return ret;
+}
+
+static int android_bat_remove(struct platform_device *pdev)
+{
+ struct android_bat_data *battery = platform_get_drvdata(pdev);
+
+ alarm_cancel(&battery->monitor_alarm);
+ flush_workqueue(battery->monitor_wqueue);
+ destroy_workqueue(battery->monitor_wqueue);
+ power_supply_unregister(&battery->psy_bat);
+ wake_lock_destroy(&battery->monitor_wake_lock);
+ wake_lock_destroy(&battery->charger_wake_lock);
+ debugfs_remove(battery->debugfs_entry);
+ kfree(battery);
+ return 0;
+}
+
+static int android_bat_suspend(struct device *dev)
+{
+ struct android_bat_data *battery = dev_get_drvdata(dev);
+
+ cancel_work_sync(&battery->monitor_work);
+ android_bat_monitor_set_alarm(
+ battery,
+ battery->charge_source == CHARGE_SOURCE_NONE ?
+ SLOW_POLL : FAST_POLL);
+ return 0;
+}
+
+static void android_bat_resume(struct device *dev)
+{
+ struct android_bat_data *battery = dev_get_drvdata(dev);
+
+ android_bat_monitor_set_alarm(battery, FAST_POLL);
+ return;
+}
+
+static const struct dev_pm_ops android_bat_pm_ops = {
+ .prepare = android_bat_suspend,
+ .complete = android_bat_resume,
+};
+
+static struct platform_driver android_bat_driver = {
+ .driver = {
+ .name = "android-battery",
+ .owner = THIS_MODULE,
+ .pm = &android_bat_pm_ops,
+ },
+ .probe = android_bat_probe,
+ .remove = android_bat_remove,
+};
+
+static int __init android_bat_init(void)
+{
+ return platform_driver_register(&android_bat_driver);
+}
+
+static void __exit android_bat_exit(void)
+{
+ platform_driver_unregister(&android_bat_driver);
+}
+
+late_initcall(android_bat_init);
+module_exit(android_bat_exit);
+
+MODULE_DESCRIPTION("Android battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 8a7cfb3cc166..e84315295034 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -42,23 +42,40 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
- power_supply_update_leds(psy);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ power_supply_update_leds(psy);
+
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ if (!psy->changed)
+ wake_unlock(&psy->work_wake_lock);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ wake_lock(&psy->work_wake_lock);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -348,6 +365,9 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto register_thermal_failed;
+ spin_lock_init(&psy->changed_lock);
+ wake_lock_init(&psy->work_wake_lock, WAKE_LOCK_SUSPEND, "power-supply");
+
rc = psy_register_cooler(psy);
if (rc)
goto register_cooler_failed;
@@ -364,6 +384,7 @@ create_triggers_failed:
psy_unregister_cooler(psy);
register_cooler_failed:
psy_unregister_thermal(psy);
+ wake_lock_destroy(&psy->work_wake_lock);
register_thermal_failed:
device_del(dev);
kobject_set_name_failed:
@@ -381,6 +402,7 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
+ wake_lock_destroy(&psy->work_wake_lock);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0f65b246cc0c..278584302f2d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1885,9 +1885,15 @@ int regulator_can_change_voltage(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
if (rdev->constraints &&
- rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE &&
- (rdev->desc->n_voltages - rdev->desc->linear_min_sel) > 1)
- return 1;
+ (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
+ return 1;
+
+ if (rdev->desc->continuous_voltage_range &&
+ rdev->constraints->min_uV && rdev->constraints->max_uV &&
+ rdev->constraints->min_uV != rdev->constraints->max_uV)
+ return 1;
+ }
return 0;
}
@@ -3315,7 +3321,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
* @config: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator.
- * Returns 0 on success.
+ * Returns a valid pointer to struct regulator_dev on success
+ * or an ERR_PTR() on error.
*/
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index df0eafb0dc7e..02be7fcae32f 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -71,26 +71,26 @@ struct voltage_map_desc {
int step;
};
-/* Voltage maps in mV */
+/* Voltage maps in uV */
static const struct voltage_map_desc ldo_voltage_map_desc = {
- .min = 800, .max = 3950, .step = 50,
+ .min = 800000, .max = 3950000, .step = 50000,
}; /* LDO1 ~ 18, 21 all */
static const struct voltage_map_desc buck1245_voltage_map_desc = {
- .min = 650, .max = 2225, .step = 25,
+ .min = 650000, .max = 2225000, .step = 25000,
}; /* Buck1, 2, 4, 5 */
static const struct voltage_map_desc buck37_voltage_map_desc = {
- .min = 750, .max = 3900, .step = 50,
+ .min = 750000, .max = 3900000, .step = 50000,
}; /* Buck3, 7 */
-/* current map in mA */
+/* current map in uA */
static const struct voltage_map_desc charger_current_map_desc = {
- .min = 200, .max = 950, .step = 50,
+ .min = 200000, .max = 950000, .step = 50000,
};
static const struct voltage_map_desc topoff_current_map_desc = {
- .min = 50, .max = 200, .step = 10,
+ .min = 50000, .max = 200000, .step = 10000,
};
static const struct voltage_map_desc *reg_voltage_map[] = {
@@ -194,7 +194,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
if (val > desc->max)
return -EINVAL;
- return val * 1000;
+ return val;
}
static int max8997_get_enable_register(struct regulator_dev *rdev,
@@ -485,7 +485,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8997->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
const struct voltage_map_desc *desc;
int rid = rdev_get_id(rdev);
int i, reg, shift, mask, ret;
@@ -509,7 +508,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
desc = reg_voltage_map[rid];
- i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
if (i < 0)
return i;
@@ -557,7 +556,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
case MAX8997_BUCK4:
case MAX8997_BUCK5:
return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
- max8997->ramp_delay);
+ max8997->ramp_delay * 1000);
}
return 0;
@@ -656,7 +655,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
bool gpio_dvs_mode = false;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
return -EINVAL;
@@ -681,7 +679,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
selector);
desc = reg_voltage_map[rid];
- new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
if (new_val < 0)
return new_val;
@@ -1123,8 +1121,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck1_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck1_voltage[i] / 1000,
- pdata->buck1_voltage[i] / 1000 +
+ pdata->buck1_voltage[i],
+ pdata->buck1_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
@@ -1132,8 +1130,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck2_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck2_voltage[i] / 1000,
- pdata->buck2_voltage[i] / 1000 +
+ pdata->buck2_voltage[i],
+ pdata->buck2_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
@@ -1141,8 +1139,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck5_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
- pdata->buck5_voltage[i] / 1000,
- pdata->buck5_voltage[i] / 1000 +
+ pdata->buck5_voltage[i],
+ pdata->buck5_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
goto err_out;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index b821d08eb64a..1f0df4046b86 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -51,39 +51,39 @@ struct voltage_map_desc {
int step;
};
-/* Voltage maps */
+/* Voltage maps in uV*/
static const struct voltage_map_desc ldo23_voltage_map_desc = {
- .min = 800, .step = 50, .max = 1300,
+ .min = 800000, .step = 50000, .max = 1300000,
};
static const struct voltage_map_desc ldo456711_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc ldo8_voltage_map_desc = {
- .min = 3000, .step = 100, .max = 3600,
+ .min = 3000000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc ldo9_voltage_map_desc = {
- .min = 2800, .step = 100, .max = 3100,
+ .min = 2800000, .step = 100000, .max = 3100000,
};
static const struct voltage_map_desc ldo10_voltage_map_desc = {
- .min = 950, .step = 50, .max = 1300,
+ .min = 95000, .step = 50000, .max = 1300000,
};
static const struct voltage_map_desc ldo1213_voltage_map_desc = {
- .min = 800, .step = 100, .max = 3300,
+ .min = 800000, .step = 100000, .max = 3300000,
};
static const struct voltage_map_desc ldo1415_voltage_map_desc = {
- .min = 1200, .step = 100, .max = 3300,
+ .min = 1200000, .step = 100000, .max = 3300000,
};
static const struct voltage_map_desc ldo1617_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc buck12_voltage_map_desc = {
- .min = 750, .step = 25, .max = 1525,
+ .min = 750000, .step = 25000, .max = 1525000,
};
static const struct voltage_map_desc buck3_voltage_map_desc = {
- .min = 1600, .step = 100, .max = 3600,
+ .min = 1600000, .step = 100000, .max = 3600000,
};
static const struct voltage_map_desc buck4_voltage_map_desc = {
- .min = 800, .step = 100, .max = 2300,
+ .min = 800000, .step = 100000, .max = 2300000,
};
static const struct voltage_map_desc *ldo_voltage_map[] = {
@@ -445,9 +445,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
return 0;
- difference = (new_selector - old_selector) * desc->step;
+ difference = (new_selector - old_selector) * desc->step / 1000;
if (difference > 0)
- return difference / ((val & 0x0f) + 1);
+ return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
return 0;
}
@@ -702,7 +702,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage1 / 1000))
+ < pdata->buck1_voltage1)
i++;
max8998->buck1_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
@@ -713,7 +713,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage2 / 1000))
+ < pdata->buck1_voltage2)
i++;
max8998->buck1_vol[1] = i;
@@ -725,7 +725,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage3 / 1000))
+ < pdata->buck1_voltage3)
i++;
max8998->buck1_vol[2] = i;
@@ -737,7 +737,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck1_voltage4 / 1000))
+ < pdata->buck1_voltage4)
i++;
max8998->buck1_vol[3] = i;
@@ -763,7 +763,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck2_voltage1 / 1000))
+ < pdata->buck2_voltage1)
i++;
max8998->buck2_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
@@ -774,7 +774,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- < (pdata->buck2_voltage2 / 1000))
+ < pdata->buck2_voltage2)
i++;
max8998->buck2_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
@@ -792,8 +792,8 @@ static int max8998_pmic_probe(struct platform_device *pdev)
int count = (desc->max - desc->min) / desc->step + 1;
regulators[index].n_voltages = count;
- regulators[index].min_uV = desc->min * 1000;
- regulators[index].uV_step = desc->step * 1000;
+ regulators[index].min_uV = desc->min;
+ regulators[index].uV_step = desc->step;
}
config.dev = max8998->dev;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 9f991f2c525a..33b65c9ad5d5 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -214,7 +214,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int ret, reg;
int mask = 0xc0, enable_ctrl;
- u8 val;
+ unsigned int val;
ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
if (ret == -EINVAL)
@@ -306,7 +306,7 @@ static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int reg, mask, ret;
int reg_id = rdev_get_id(rdev);
- u8 val;
+ unsigned int val;
ret = s5m8767_get_voltage_register(rdev, &reg);
if (ret)
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 96bafc5c3bf8..8f0dcfedb83c 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = {
.alarm_irq_enable = da9055_rtc_alarm_irq_enable,
};
-static int __init da9055_rtc_device_init(struct da9055 *da9055,
+static int da9055_rtc_device_init(struct da9055 *da9055,
struct da9055_pdata *pdata)
{
int ret;
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 9bd5da36f99e..704488d0f819 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -248,7 +248,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
default:
return;
}
- kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
+ inc_irq_stat(IRQEXT_DSD);
if (!ip) { /* no intparm: unsolicited interrupt */
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
"interrupt");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 806fe912d6e7..e37bc1620d14 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4274,7 +4274,7 @@ static struct ccw_driver dasd_eckd_driver = {
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
.uc_handler = dasd_generic_uc_handler,
- .int_class = IOINT_DAS,
+ .int_class = IRQIO_DAS,
};
/*
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index eb748507c7fa..414698584344 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -78,7 +78,7 @@ static struct ccw_driver dasd_fba_driver = {
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
- .int_class = IOINT_DAS,
+ .int_class = IRQIO_DAS,
};
static void
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 40084501c31b..33b7141a182f 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -44,6 +44,7 @@
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
+#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
@@ -630,7 +631,8 @@ static void raw3215_shutdown(struct raw3215_info *raw)
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
- if (!(raw->port.flags & ASYNC_INITIALIZED))
+ if (!(raw->port.flags & ASYNC_INITIALIZED) ||
+ (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -805,7 +807,7 @@ static struct ccw_driver raw3215_ccw_driver = {
.freeze = &raw3215_pm_stop,
.thaw = &raw3215_pm_start,
.restore = &raw3215_pm_start,
- .int_class = IOINT_C15,
+ .int_class = IRQIO_C15,
};
#ifdef CONFIG_TN3215_CONSOLE
@@ -927,6 +929,8 @@ static int __init con3215_init(void)
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
+ raw->flags |= RAW3215_FIXED;
+
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f3b8bb84faf2..9a6c140c5f07 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1396,7 +1396,7 @@ static struct ccw_driver raw3270_ccw_driver = {
.freeze = &raw3270_pm_stop,
.thaw = &raw3270_pm_start,
.restore = &raw3270_pm_start,
- .int_class = IOINT_C70,
+ .int_class = IRQIO_C70,
};
static int
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4fa21f7e2308..12c16a65dd25 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -400,7 +400,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
u32 finished_sccb;
u32 evbuf_pending;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+ inc_irq_stat(IRQEXT_SCP);
spin_lock(&sclp_lock);
finished_sccb = param32 & 0xfffffff8;
evbuf_pending = param32 & 0x3;
@@ -813,7 +813,7 @@ static void sclp_check_handler(struct ext_code ext_code,
{
u32 finished_sccb;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+ inc_irq_stat(IRQEXT_SCP);
finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 6ae929c024ae..9aa79702b370 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1193,7 +1193,7 @@ static struct ccw_driver tape_34xx_driver = {
.set_online = tape_34xx_online,
.set_offline = tape_generic_offline,
.freeze = tape_generic_pm_suspend,
- .int_class = IOINT_TAP,
+ .int_class = IRQIO_TAP,
};
static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 1b0eb49f739c..327cb19ad0b0 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1656,7 +1656,7 @@ static struct ccw_driver tape_3590_driver = {
.set_offline = tape_generic_offline,
.set_online = tape_3590_online,
.freeze = tape_generic_pm_suspend,
- .int_class = IOINT_TAP,
+ .int_class = IRQIO_TAP,
};
/*
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 73bef0bd394c..483f72ba030d 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -74,7 +74,7 @@ static struct ccw_driver ur_driver = {
.set_online = ur_set_online,
.set_offline = ur_set_offline,
.freeze = ur_pm_suspend,
- .int_class = IOINT_VMR,
+ .int_class = IRQIO_VMR,
};
static DEFINE_MUTEX(vmur_mutex);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 68e80e2734a4..10729bbceced 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -283,7 +283,7 @@ struct chsc_sei_nt2_area {
u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
} __packed;
-#define CHSC_SEI_NT0 0ULL
+#define CHSC_SEI_NT0 (1ULL << 63)
#define CHSC_SEI_NT2 (1ULL << 61)
struct chsc_sei {
@@ -291,7 +291,8 @@ struct chsc_sei {
u32 reserved1;
u64 ntsm; /* notification type mask */
struct chsc_header response;
- u32 reserved2;
+ u32 :24;
+ u8 nt;
union {
struct chsc_sei_nt0_area nt0_area;
struct chsc_sei_nt2_area nt2_area;
@@ -496,17 +497,17 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
css_schedule_eval_all();
}
- switch (sei->ntsm) {
- case CHSC_SEI_NT0:
+ switch (sei->nt) {
+ case 0:
chsc_process_sei_nt0(&sei->u.nt0_area);
- return 1;
- case CHSC_SEI_NT2:
+ break;
+ case 2:
chsc_process_sei_nt2(&sei->u.nt2_area);
- return 1;
+ break;
default:
- CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
- sei->ntsm);
- return 0;
+ CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
+ sei->nt);
+ break;
}
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
@@ -537,15 +538,7 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
-
- /*
- * The ntsm does not allow to select NT0 and NT2 together. We need to
- * first check for NT2, than additionally for NT0...
- */
-#ifdef CONFIG_PCI
- if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
-#endif
- __chsc_process_crw(sei, CHSC_SEI_NT0);
+ __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 8f9a1a384496..facdf809113f 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
- kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+ inc_irq_stat(IRQIO_CSC);
/* Copy irb to provided request and set done. */
if (!request) {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8e927b9f285f..c8faf6230b0f 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -611,7 +611,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
irb = (struct irb *)&S390_lowcore.irb;
do {
- kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+ kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
continue;
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch) {
/* Clear pending interrupt condition. */
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
tsch(tpi_info->schid, irb);
continue;
}
@@ -633,9 +633,9 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
} else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
spin_unlock(sch->lock);
/*
* Are more interrupts pending?
@@ -678,7 +678,7 @@ static void cio_tsch(struct subchannel *sch)
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
if (!irq_context) {
irq_exit();
_local_bh_enable();
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6995cff44636..7cd5c6812ac7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -758,7 +758,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
cdev->private->cdev = cdev;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
@@ -1023,7 +1023,7 @@ static void io_subchannel_irq(struct subchannel *sch)
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
else
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
}
void io_subchannel_init_config(struct subchannel *sch)
@@ -1634,7 +1634,7 @@ ccw_device_probe_console(void)
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
console_private.cdev = &console_cdev;
- console_private.int_class = IOINT_CIO;
+ console_private.int_class = IRQIO_CIO;
ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) {
cio_release_console();
@@ -1715,13 +1715,13 @@ ccw_device_probe (struct device *dev)
if (cdrv->int_class != 0)
cdev->private->int_class = cdrv->int_class;
else
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
if (ret) {
cdev->drv = NULL;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
return ret;
}
@@ -1755,7 +1755,7 @@ ccw_device_remove (struct device *dev)
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
- cdev->private->int_class = IOINT_CIO;
+ cdev->private->int_class = IRQIO_CIO;
return 0;
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 2e575cff9845..7d4ecb65db00 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -61,11 +61,10 @@ dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
if (dev_event == DEV_EVENT_INTERRUPT) {
if (state == DEV_STATE_ONLINE)
- kstat_cpu(smp_processor_id()).
- irqs[cdev->private->int_class]++;
+ inc_irq_stat(cdev->private->int_class);
else if (state != DEV_STATE_CMFCHANGE &&
state != DEV_STATE_CMFUPDATE)
- kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+ inc_irq_stat(IRQIO_CIO);
}
dev_jumptable[state][dev_event](cdev, dev_event);
}
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 6c9673400464..d9eddcba7e88 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -139,7 +139,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
EADM_LOG(6, "irq");
EADM_LOG_HEX(6, irb, sizeof(*irb));
- kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
+ inc_irq_stat(IRQIO_ADM);
if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
&& scsw->eswf == 1 && irb->esw.eadm.erw.r)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index bdb394b066fc..bde5255200dc 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -182,7 +182,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
struct qdio_q *q;
last_ai_time = S390_lowcore.int_clock;
- kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
+ inc_irq_stat(IRQIO_QAI);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7b865a7300e6..b8b340ac5332 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1272,7 +1272,7 @@ out:
static void ap_interrupt_handler(void *unused1, void *unused2)
{
- kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
+ inc_irq_stat(IRQIO_APB);
tasklet_schedule(&ap_tasklet);
}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 7dabef624da3..8491111aec12 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -392,7 +392,7 @@ static void kvm_extint_handler(struct ext_code ext_code,
if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
+ inc_irq_stat(IRQEXT_VRT);
/* The LSB might be overloaded, we have to mask it */
vq = (struct virtqueue *)(param64 & ~1UL);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 5c70a6599578..83bc9c5fa0c1 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -282,7 +282,7 @@ static struct ccw_driver claw_ccw_driver = {
.ids = claw_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_CLW,
+ .int_class = IRQIO_CLW,
};
static ssize_t claw_driver_group_store(struct device_driver *ddrv,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 817b68925ddd..676f12049a36 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1755,7 +1755,7 @@ static struct ccw_driver ctcm_ccw_driver = {
.ids = ctcm_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_CTC,
+ .int_class = IRQIO_CTC,
};
static struct ccwgroup_driver ctcm_group_driver = {
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2ca0f1dd7a00..c645dc9e98af 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2384,7 +2384,7 @@ static struct ccw_driver lcs_ccw_driver = {
.ids = lcs_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
- .int_class = IOINT_LCS,
+ .int_class = IRQIO_LCS,
};
/**
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 5aedcdf4ac5c..1ebe67cd1833 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -126,6 +126,12 @@ static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
static int sh_clk_div_enable(struct clk *clk)
{
+ if (clk->div_mask == SH_CLK_DIV6_MSK) {
+ int ret = sh_clk_div_set_rate(clk, clk->rate);
+ if (ret < 0)
+ return ret;
+ }
+
sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
return 0;
}
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 0ce50d12c30f..f92cb54328f4 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -40,6 +40,15 @@ config ANDROID_LOW_MEMORY_KILLER
---help---
Register processes to be killed when memory is low
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+ bool "Android Low Memory Killer: detect oom_adj values"
+ depends on ANDROID_LOW_MEMORY_KILLER
+ default y
+ ---help---
+ Detect oom_adj values written to
+ /sys/module/lowmemorykiller/parameters/adj and convert them
+ to oom_score_adj values.
+
config ANDROID_INTF_ALARM_DEV
bool "Android alarm driver"
depends on RTC_CLASS
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index b15fb0d6b152..000000000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - rename files to be not so "generic"
- - make sure things build as modules properly
- - add proper arch dependencies as needed
- - audit userspace interfaces to make sure they are sane
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index a9b293ff3cc8..7f677d6596dd 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/alarmtimer.h>
+#include <linux/wakelock.h>
#include "android_alarm.h"
#define ANDROID_ALARM_PRINT_INFO (1U << 0)
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 634b9ae713e0..72064fcdb990 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -317,22 +317,13 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
get_file(asma->file);
- /*
- * XXX - Reworked to use shmem_zero_setup() instead of
- * shmem_set_file while we're in staging. -jstultz
- */
- if (vma->vm_flags & VM_SHARED) {
- ret = shmem_zero_setup(vma);
- if (ret) {
- fput(asma->file);
- goto out;
- }
+ if (vma->vm_flags & VM_SHARED)
+ shmem_set_file(vma, asma->file);
+ else {
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
}
-
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
-
out:
mutex_unlock(&ashmem_mutex);
return ret;
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index dbc63cbb4d3a..bb7396d6aa45 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -68,6 +68,8 @@ static LIST_HEAD(log_list);
* @log: The associated log
* @list: The associated entry in @logger_log's list
* @r_off: The current read head offset.
+ * @r_all: The flag tells if a reader can read all entries
+ * @r_ver: Readers' ABI version
*
* This object lives from open to release, so we don't need additional
* reference counting. The structure is protected by log->mutex.
@@ -76,6 +78,8 @@ struct logger_reader {
struct logger_log *log;
struct list_head list;
size_t r_off;
+ bool r_all;
+ int r_ver;
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
@@ -109,8 +113,29 @@ static inline struct logger_log *file_get_log(struct file *file)
}
/*
- * get_entry_len - Grabs the length of the payload of the next entry starting
- * from 'off'.
+ * get_entry_header - returns a pointer to the logger_entry header within
+ * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
+ * be provided. Typically the return value will be a pointer within
+ * 'logger->buf'. However, a pointer to 'scratch' may be returned if
+ * the log entry spans the end and beginning of the circular buffer.
+ */
+static struct logger_entry *get_entry_header(struct logger_log *log,
+ size_t off, struct logger_entry *scratch)
+{
+ size_t len = min(sizeof(struct logger_entry), log->size - off);
+ if (len != sizeof(struct logger_entry)) {
+ memcpy(((void *) scratch), log->buffer + off, len);
+ memcpy(((void *) scratch) + len, log->buffer,
+ sizeof(struct logger_entry) - len);
+ return scratch;
+ }
+
+ return (struct logger_entry *) (log->buffer + off);
+}
+
+/*
+ * get_entry_msg_len - Grabs the length of the message of the entry
+ * starting from from 'off'.
*
* An entry length is 2 bytes (16 bits) in host endian order.
* In the log, the length does not include the size of the log entry structure.
@@ -118,20 +143,45 @@ static inline struct logger_log *file_get_log(struct file *file)
*
* Caller needs to hold log->mutex.
*/
-static __u32 get_entry_len(struct logger_log *log, size_t off)
+static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
{
- __u16 val;
+ struct logger_entry scratch;
+ struct logger_entry *entry;
- /* copy 2 bytes from buffer, in memcpy order, */
- /* handling possible wrap at end of buffer */
+ entry = get_entry_header(log, off, &scratch);
+ return entry->len;
+}
- ((__u8 *)&val)[0] = log->buffer[off];
- if (likely(off+1 < log->size))
- ((__u8 *)&val)[1] = log->buffer[off+1];
+static size_t get_user_hdr_len(int ver)
+{
+ if (ver < 2)
+ return sizeof(struct user_logger_entry_compat);
else
- ((__u8 *)&val)[1] = log->buffer[0];
+ return sizeof(struct logger_entry);
+}
- return sizeof(struct logger_entry) + val;
+static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
+ char __user *buf)
+{
+ void *hdr;
+ size_t hdr_len;
+ struct user_logger_entry_compat v1;
+
+ if (ver < 2) {
+ v1.len = entry->len;
+ v1.__pad = 0;
+ v1.pid = entry->pid;
+ v1.tid = entry->tid;
+ v1.sec = entry->sec;
+ v1.nsec = entry->nsec;
+ hdr = &v1;
+ hdr_len = sizeof(struct user_logger_entry_compat);
+ } else {
+ hdr = entry;
+ hdr_len = sizeof(struct logger_entry);
+ }
+
+ return copy_to_user(buf, hdr, hdr_len);
}
/*
@@ -145,15 +195,31 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
char __user *buf,
size_t count)
{
+ struct logger_entry scratch;
+ struct logger_entry *entry;
size_t len;
+ size_t msg_start;
/*
- * We read from the log in two disjoint operations. First, we read from
- * the current read head offset up to 'count' bytes or to the end of
+ * First, copy the header to userspace, using the version of
+ * the header requested
+ */
+ entry = get_entry_header(log, reader->r_off, &scratch);
+ if (copy_header_to_user(reader->r_ver, entry, buf))
+ return -EFAULT;
+
+ count -= get_user_hdr_len(reader->r_ver);
+ buf += get_user_hdr_len(reader->r_ver);
+ msg_start = logger_offset(log,
+ reader->r_off + sizeof(struct logger_entry));
+
+ /*
+ * We read from the msg in two disjoint operations. First, we read from
+ * the current msg head offset up to 'count' bytes or to the end of
* the log, whichever comes first.
*/
- len = min(count, log->size - reader->r_off);
- if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ len = min(count, log->size - msg_start);
+ if (copy_to_user(buf, log->buffer + msg_start, len))
return -EFAULT;
/*
@@ -164,9 +230,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
- reader->r_off = logger_offset(log, reader->r_off + count);
+ reader->r_off = logger_offset(log, reader->r_off +
+ sizeof(struct logger_entry) + count);
- return count;
+ return count + get_user_hdr_len(reader->r_ver);
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log->buffer' which contains the first entry readable by 'euid'
+ */
+static size_t get_next_entry_by_uid(struct logger_log *log,
+ size_t off, uid_t euid)
+{
+ while (off != log->w_off) {
+ struct logger_entry *entry;
+ struct logger_entry scratch;
+ size_t next_len;
+
+ entry = get_entry_header(log, off, &scratch);
+
+ if (entry->euid == euid)
+ return off;
+
+ next_len = sizeof(struct logger_entry) + entry->len;
+ off = logger_offset(log, off + next_len);
+ }
+
+ return off;
}
/*
@@ -178,7 +269,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
* - If there are no log entries to read, blocks until log is written to
* - Atomically reads exactly one log entry
*
- * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * Will set errno to EINVAL if read
* buffer is insufficient to hold next entry.
*/
static ssize_t logger_read(struct file *file, char __user *buf,
@@ -219,6 +310,10 @@ start:
mutex_lock(&log->mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
/* is there still something to read or did we race? */
if (unlikely(log->w_off == reader->r_off)) {
mutex_unlock(&log->mutex);
@@ -226,7 +321,8 @@ start:
}
/* get the size of the next entry */
- ret = get_entry_len(log, reader->r_off);
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
if (count < ret) {
ret = -EINVAL;
goto out;
@@ -252,7 +348,8 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
size_t count = 0;
do {
- size_t nr = get_entry_len(log, off);
+ size_t nr = sizeof(struct logger_entry) +
+ get_entry_msg_len(log, off);
off = logger_offset(log, off + nr);
count += nr;
} while (count < len);
@@ -382,7 +479,9 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
header.tid = current->pid;
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
+ header.euid = current_euid();
header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+ header.hdr_size = sizeof(struct logger_entry);
/* null writes succeed, return zero */
if (unlikely(!header.len))
@@ -463,6 +562,10 @@ static int logger_open(struct inode *inode, struct file *file)
return -ENOMEM;
reader->log = log;
+ reader->r_ver = 1;
+ reader->r_all = in_egroup_p(inode->i_gid) ||
+ capable(CAP_SYSLOG);
+
INIT_LIST_HEAD(&reader->list);
mutex_lock(&log->mutex);
@@ -522,6 +625,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
poll_wait(file, &log->wq, wait);
mutex_lock(&log->mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
if (log->w_off != reader->r_off)
ret |= POLLIN | POLLRDNORM;
mutex_unlock(&log->mutex);
@@ -529,11 +636,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
return ret;
}
+static long logger_set_version(struct logger_reader *reader, void __user *arg)
+{
+ int version;
+ if (copy_from_user(&version, arg, sizeof(int)))
+ return -EFAULT;
+
+ if ((version < 1) || (version > 2))
+ return -EINVAL;
+
+ reader->r_ver = version;
+ return 0;
+}
+
static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct logger_log *log = file_get_log(file);
struct logger_reader *reader;
- long ret = -ENOTTY;
+ long ret = -EINVAL;
+ void __user *argp = (void __user *) arg;
mutex_lock(&log->mutex);
@@ -558,8 +679,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
reader = file->private_data;
+
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
if (log->w_off != reader->r_off)
- ret = get_entry_len(log, reader->r_off);
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
else
ret = 0;
break;
@@ -568,11 +695,32 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EBADF;
break;
}
+ if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) ||
+ capable(CAP_SYSLOG))) {
+ ret = -EPERM;
+ break;
+ }
list_for_each_entry(reader, &log->readers, list)
reader->r_off = log->w_off;
log->head = log->w_off;
ret = 0;
break;
+ case LOGGER_GET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = reader->r_ver;
+ break;
+ case LOGGER_SET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = logger_set_version(reader, argp);
+ break;
}
mutex_unlock(&log->mutex);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
index 9b929a8c7468..1eff9e631494 100644
--- a/drivers/staging/android/logger.h
+++ b/drivers/staging/android/logger.h
@@ -21,7 +21,7 @@
#include <linux/ioctl.h>
/**
- * struct logger_entry - defines a single entry that is given to a logger
+ * struct logger_entry_compat - defines a single entry that is given to a logger
* @len: The length of the payload
* @__pad: Two bytes of padding that appear to be required
* @pid: The generating process' process ID
@@ -29,8 +29,12 @@
* @sec: The number of seconds that have elapsed since the Epoch
* @nsec: The number of nanoseconds that have elapsed since @sec
* @msg: The message that is to be logged
+ *
+ * The userspace structure for version 1 of the logger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
*/
-struct logger_entry {
+struct user_logger_entry_compat {
__u16 len;
__u16 __pad;
__s32 pid;
@@ -40,14 +44,38 @@ struct logger_entry {
char msg[0];
};
+/**
+ * struct logger_entry_compat - defines a single entry that is given to a logger
+ * @len: The length of the payload
+ * @__pad: Two bytes of padding that appear to be required
+ * @pid: The generating process' process ID
+ * @tid: The generating process' thread ID
+ * @sec: The number of seconds that have elapsed since the Epoch
+ * @nsec: The number of nanoseconds that have elapsed since @sec
+ * @msg: The message that is to be logged
+ * @euid: The effective UID of logger
+ *
+ * The structure for version 2 of the logger_entry ABI.
+ * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
+ * is called with version >= 2
+ */
+struct logger_entry {
+ __u16 len;
+ __u16 hdr_size;
+ __s32 pid;
+ __s32 tid;
+ __s32 sec;
+ __s32 nsec;
+ uid_t euid;
+ char msg[0];
+};
+
#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
#define LOGGER_LOG_MAIN "log_main" /* everything else */
-#define LOGGER_ENTRY_MAX_LEN (4*1024)
-#define LOGGER_ENTRY_MAX_PAYLOAD \
- (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+#define LOGGER_ENTRY_MAX_PAYLOAD 4076
#define __LOGGERIO 0xAE
@@ -55,5 +83,7 @@ struct logger_entry {
#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */
+#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */
#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 3b91b0fd4de3..fbab67d3ecdd 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -35,6 +35,7 @@
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
+#include <linux/swap.h>
#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
@@ -74,7 +75,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES);
+ int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
@@ -175,9 +176,94 @@ static void __exit lowmem_exit(void)
unregister_shrinker(&lowmem_shrinker);
}
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static int lowmem_oom_adj_to_oom_score_adj(int oom_adj)
+{
+ if (oom_adj == OOM_ADJUST_MAX)
+ return OOM_SCORE_ADJ_MAX;
+ else
+ return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+ int i;
+ int oom_adj;
+ int oom_score_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+
+ if (array_size <= 0)
+ return;
+
+ oom_adj = lowmem_adj[array_size - 1];
+ if (oom_adj > OOM_ADJUST_MAX)
+ return;
+
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ if (oom_score_adj <= OOM_ADJUST_MAX)
+ return;
+
+ lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+ for (i = 0; i < array_size; i++) {
+ oom_adj = lowmem_adj[i];
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ lowmem_adj[i] = oom_score_adj;
+ lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+ oom_adj, oom_score_adj);
+ }
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_array_ops.set(val, kp);
+
+ /* HACK: Autodetect oom_adj values in lowmem_adj array */
+ lowmem_autodetect_oom_adj_values();
+
+ return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+ param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+ .set = lowmem_adj_array_set,
+ .get = lowmem_adj_array_get,
+ .free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+ .max = ARRAY_SIZE(lowmem_adj),
+ .num = &lowmem_adj_size,
+ .ops = &param_ops_int,
+ .elemsize = sizeof(lowmem_adj[0]),
+ .elem = lowmem_adj,
+};
+#endif
+
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+__module_param_call(MODULE_PARAM_PREFIX, adj,
+ &lowmem_adj_array_ops,
+ .arr = &__param_arr_adj,
+ S_IRUGO | S_IWUSR, -1);
+__MODULE_PARM_TYPE(adj, "array of int");
+#else
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
+#endif
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7de2a10213bd..36eec320569c 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -444,6 +444,7 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
+ select COMEDI_FC
depends on VIRT_TO_BUS
---help---
Enable support for National Instruments AT-A2150 cards
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index b7bba1790a20..9b038e4a7e71 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1549,6 +1549,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
if (cmd == COMEDI_DEVCONFIG) {
rc = do_devconfig_ioctl(dev,
(struct comedi_devconfig __user *)arg);
+ if (rc == 0)
+ /* Evade comedi_auto_unconfig(). */
+ dev_file_info->hardware_device = NULL;
goto done;
}
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index fb3d09323ba1..01de996239f1 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -345,7 +345,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
devpriv->timer_running = 0;
- del_timer(&devpriv->timer);
+ del_timer_sync(&devpriv->timer);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index aaac0b2cc9eb..fd1662b4175d 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_unipolar = 0,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = {caldac_none},
.has_8255 = 0,
@@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
- .ao_speed = 357,
+ .ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = {caldac_none},
.has_8255 = 0,
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 580406cb1808..b2f8331e4acf 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -3,7 +3,9 @@ config FIREWIRE_SERIAL
depends on FIREWIRE
help
This enables TTY over IEEE 1394, providing high-speed serial
- connectivity to cabled peers.
+ connectivity to cabled peers. This driver implements a
+ ad-hoc transport protocol and is currently limited to
+ Linux-to-Linux communication.
To compile this driver as a module, say M here: the module will
be called firewire-serial.
diff --git a/drivers/staging/fwserial/TODO b/drivers/staging/fwserial/TODO
index 726900548eae..8dae8fb25223 100644
--- a/drivers/staging/fwserial/TODO
+++ b/drivers/staging/fwserial/TODO
@@ -1,5 +1,5 @@
-TODOs
------
+TODOs prior to this driver moving out of staging
+------------------------------------------------
1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- I/O is handled asynchronously which presents some issues when error
conditions occur.
@@ -11,17 +11,9 @@ TODOs
-- Issues with firewire stack --
1. This driver uses the same unregistered vendor id that the firewire core does
(0xd00d1e). Perhaps this could be exposed as a define in
- firewire-constants.h?
-2. MAX_ASYNC_PAYLOAD needs to be publicly exposed by core/ohci
- - otherwise how will this driver know the max size of address window to
- open for one packet write?
+ firewire.h?
3. Maybe device_max_receive() and link_speed_to_max_payload() should be
taken up by the firewire core?
-4. To avoid dropping rx data while still limiting the maximum buffering,
- the size of the AR context must be known. How to expose this to drivers?
-5. Explore if bigger AR context will reduce RCODE_BUSY responses
- (or auto-grow to certain max size -- but this would require major surgery
- as the current AR is contiguously mapped)
-- Issues with TTY core --
1. Hack for alternate device name scheme
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 61ee29083b26..d03a7f57e8d4 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -179,7 +179,7 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
/* Returns the max receive packet size for the given card */
static inline int device_max_receive(struct fw_device *fw_device)
{
- return 1 << (clamp_t(int, fw_device->max_rec, 8U, 13U) + 1);
+ return 1 << (clamp_t(int, fw_device->max_rec, 8U, 11U) + 1);
}
static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 8b572edf9563..caa1c1ea82d5 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -374,10 +374,10 @@ static inline void fwtty_bind_console(struct fwtty_port *port,
*/
static inline int link_speed_to_max_payload(unsigned speed)
{
- static const int max_async[] = { 307, 614, 1229, 2458, 4916, 9832, };
- BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_3200);
+ static const int max_async[] = { 307, 614, 1229, 2458, };
+ BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_800);
- speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_3200);
+ speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_800);
if (limit_bw)
return max_async[speed];
else
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index ea295b25308c..87979a0d03a9 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -27,8 +27,8 @@ config ADIS16130
config ADIS16260
tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- select IIO_SW_RING if IIO_BUFFER
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
Say yes here to build support for Analog Devices ADIS16260 ADIS16265
ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index ecf0f44bc70e..cec19f1cf56c 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -584,7 +584,6 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
ret = imx_drm_encoder_register(imx_drm_encoder);
if (ret) {
- kfree(imx_drm_encoder);
ret = -ENOMEM;
goto err_register;
}
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 677e665ca86d..f7059cddd7fd 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -1104,7 +1104,9 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto out_failed_irq;
- ipu_reset(ipu);
+ ret = ipu_reset(ipu);
+ if (ret)
+ goto out_failed_reset;
/* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
@@ -1129,6 +1131,7 @@ failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
ipu_irq_exit(ipu);
+out_failed_reset:
out_failed_irq:
clk_disable_unprepare(ipu->clk);
failed_clk_get:
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 1892006526b5..4b3a019409b5 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -452,7 +452,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
int ret;
ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
- if (IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) {
+ if (IS_ERR(ipu_crtc->ipu_ch)) {
ret = PTR_ERR(ipu_crtc->ipu_ch);
goto err_out;
}
@@ -472,7 +472,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
if (pdata->dp >= 0) {
ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
if (IS_ERR(ipu_crtc->dp)) {
- ret = PTR_ERR(ipu_crtc->ipu_ch);
+ ret = PTR_ERR(ipu_crtc->dp);
goto err_out;
}
}
@@ -548,6 +548,8 @@ static int ipu_drm_probe(struct platform_device *pdev)
ipu_crtc->dev = &pdev->dev;
ret = ipu_crtc_init(ipu_crtc, pdata);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, ipu_crtc);
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
index 1ca0e0016de4..d85e058f2845 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/staging/omapdrm/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm -Werror
omapdrm-y := omap_drv.o \
+ omap_irq.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
index 938c7888ca31..abeeb00aaa12 100644
--- a/drivers/staging/omapdrm/TODO
+++ b/drivers/staging/omapdrm/TODO
@@ -17,9 +17,6 @@ TODO
. Revisit GEM sync object infrastructure.. TTM has some framework for this
already. Possibly this could be refactored out and made more common?
There should be some way to do this with less wheel-reinvention.
-. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
- part connector. Which results in a bit of duct tape to fwd calls from
- encoder to connector. Possibly this could be done a bit better.
. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
access is made from any component in the system. Which means on suspend
CRTC's should be disabled, and on resume the LUT should be reprogrammed
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 91edb3f96972..4cc9ee733c5f 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -31,9 +31,10 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
+ struct drm_encoder *encoder;
};
-static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
mode->clock = timings->pixel_clock;
@@ -64,7 +65,7 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
mode->flags |= DRM_MODE_FLAG_NVSYNC;
}
-static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
struct drm_display_mode *mode)
{
timings->pixel_clock = mode->clock;
@@ -96,48 +97,7 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
}
-static void omap_connector_dpms(struct drm_connector *connector, int mode)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- int old_dpms;
-
- DBG("%s: %d", dssdev->name, mode);
-
- old_dpms = connector->dpms;
-
- /* from off to on, do from crtc to connector */
- if (mode < old_dpms)
- drm_helper_connector_dpms(connector, mode);
-
- if (mode == DRM_MODE_DPMS_ON) {
- /* store resume info for suspended displays */
- switch (dssdev->state) {
- case OMAP_DSS_DISPLAY_SUSPENDED:
- dssdev->activate_after_resume = true;
- break;
- case OMAP_DSS_DISPLAY_DISABLED: {
- int ret = dssdev->driver->enable(dssdev);
- if (ret) {
- DBG("%s: failed to enable: %d",
- dssdev->name, ret);
- dssdev->driver->disable(dssdev);
- }
- break;
- }
- default:
- break;
- }
- } else {
- /* TODO */
- }
-
- /* from on to off, do from connector to crtc */
- if (mode > old_dpms)
- drm_helper_connector_dpms(connector, mode);
-}
-
-enum drm_connector_status omap_connector_detect(
+static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -164,8 +124,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
- dssdev->driver->disable(dssdev);
-
DBG("%s", omap_connector->dssdev->name);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -261,36 +219,12 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector)
{
- int i;
struct omap_connector *omap_connector = to_omap_connector(connector);
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_mode_object *obj;
-
- if (connector->encoder_ids[i] == 0)
- break;
-
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
-
- if (obj) {
- struct drm_encoder *encoder = obj_to_encoder(obj);
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(encoder);
- DBG("%s: found %s", omap_connector->dssdev->name,
- mgr->name);
- return encoder;
- }
- }
-
- DBG("%s: no encoder", omap_connector->dssdev->name);
-
- return NULL;
+ return omap_connector->encoder;
}
static const struct drm_connector_funcs omap_connector_funcs = {
- .dpms = omap_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = omap_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = omap_connector_destroy,
@@ -302,34 +236,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.best_encoder = omap_connector_attached_encoder,
};
-/* called from encoder when mode is set, to propagate settings to the dssdev */
-void omap_connector_mode_set(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = connector->dev;
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
-
- copy_timings_drm_to_omap(&timings, mode);
-
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- omap_connector->dssdev->name,
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-
- if (dssdrv->check_timings(dssdev, &timings)) {
- dev_err(dev->dev, "could not set timings\n");
- return;
- }
-
- dssdrv->set_timings(dssdev, &timings);
-}
-
/* flush an area of the framebuffer (in case of manual update display that
* is not automatically flushed)
*/
@@ -344,7 +250,8 @@ void omap_connector_flush(struct drm_connector *connector,
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev)
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
@@ -360,6 +267,8 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
}
omap_connector->dssdev = dssdev;
+ omap_connector->encoder = encoder;
+
connector = &omap_connector->base;
drm_connector_init(dev, connector, &omap_connector_funcs,
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index d87bd84257bd..5c6ed6040eff 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -28,19 +28,131 @@
struct omap_crtc {
struct drm_crtc base;
struct drm_plane *plane;
+
const char *name;
- int id;
+ int pipe;
+ enum omap_channel channel;
+ struct omap_overlay_manager_info info;
+
+ /*
+ * Temporary: eventually this will go away, but it is needed
+ * for now to keep the output's happy. (They only need
+ * mgr->id.) Eventually this will be replaced w/ something
+ * more common-panel-framework-y
+ */
+ struct omap_overlay_manager mgr;
+
+ struct omap_video_timings timings;
+ bool enabled;
+ bool full_update;
+
+ struct omap_drm_apply apply;
+
+ struct omap_drm_irq apply_irq;
+ struct omap_drm_irq error_irq;
+
+ /* list of in-progress apply's: */
+ struct list_head pending_applies;
+
+ /* list of queued apply's: */
+ struct list_head queued_applies;
+
+ /* for handling queued and in-progress applies: */
+ struct work_struct apply_work;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct drm_framebuffer *old_fb;
+
+ /* for handling page flips without caring about what
+ * the callback is called from. Possibly we should just
+ * make omap_gem always call the cb from the worker so
+ * we don't have to care about this..
+ *
+ * XXX maybe fold into apply_work??
+ */
+ struct work_struct page_flip_work;
+};
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+ return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ omap_crtc->timings = *timings;
+ omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+ DBG("%s", omap_crtc->name);
+ dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+ struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
};
+/*
+ * CRTC funcs:
+ */
+
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s", omap_crtc->name);
+
+ WARN_ON(omap_crtc->apply_irq.registered);
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
omap_crtc->plane->funcs->destroy(omap_crtc->plane);
drm_crtc_cleanup(crtc);
+
kfree(omap_crtc);
}
@@ -48,14 +160,25 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
int i;
- WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+ DBG("%s: %d", omap_crtc->name, mode);
+
+ if (enabled != omap_crtc->enabled) {
+ omap_crtc->enabled = enabled;
+ omap_crtc->full_update = true;
+ omap_crtc_apply(crtc, &omap_crtc->apply);
- for (i = 0; i < priv->num_planes; i++) {
- struct drm_plane *plane = priv->planes[i];
- if (plane->crtc == crtc)
- WARN_ON(omap_plane_dpms(plane, mode));
+ /* also enable our private plane: */
+ WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+ /* and any attached overlay planes: */
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+ if (plane->crtc == crtc)
+ WARN_ON(omap_plane_dpms(plane, mode));
+ }
}
}
@@ -73,12 +196,26 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_plane *plane = omap_crtc->plane;
- return omap_plane_mode_set(plane, crtc, crtc->fb,
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ omap_crtc->full_update = true;
+
+ return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -102,10 +239,11 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_plane *plane = omap_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
- return plane->funcs->update_plane(plane, crtc, crtc->fb,
+ return omap_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ NULL, NULL);
}
static void omap_crtc_load_lut(struct drm_crtc *crtc)
@@ -114,63 +252,54 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
static void vblank_cb(void *arg)
{
- static uint32_t sequence;
struct drm_crtc *crtc = arg;
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_pending_vblank_event *event = omap_crtc->event;
unsigned long flags;
- struct timeval now;
- WARN_ON(!event);
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* wakeup userspace */
+ if (omap_crtc->event)
+ drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
omap_crtc->event = NULL;
+ omap_crtc->old_fb = NULL;
- /* wakeup userspace */
- if (event) {
- do_gettimeofday(&now);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- /* TODO: we can't yet use the vblank time accounting,
- * because omapdss lower layer is the one that knows
- * the irq # and registers the handler, which more or
- * less defeats how drm_irq works.. for now just fake
- * the sequence number and use gettimeofday..
- *
- event->event.sequence = drm_vblank_count_and_time(
- dev, omap_crtc->id, &now);
- */
- event->event.sequence = sequence++;
- event->event.tv_sec = now.tv_sec;
- event->event.tv_usec = now.tv_usec;
- list_add_tail(&event->base.link,
- &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void page_flip_cb(void *arg)
+static void page_flip_worker(struct work_struct *work)
{
- struct drm_crtc *crtc = arg;
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, page_flip_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- omap_crtc->old_fb = NULL;
-
- omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
-
- /* really we'd like to setup the callback atomically w/ setting the
- * new scanout buffer to avoid getting stuck waiting an extra vblank
- * cycle.. for now go for correctness and later figure out speed..
- */
- omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+ mutex_lock(&dev->mode_config.mutex);
+ omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x << 16, crtc->y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16,
+ vblank_cb, crtc);
+ mutex_unlock(&dev->mode_config.mutex);
bo = omap_framebuffer_bo(crtc->fb, 0);
drm_gem_object_unreference_unlocked(bo);
}
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ /* avoid assumptions about what ctxt we are called from: */
+ queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -179,14 +308,14 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_gem_object *bo;
- DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+ DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+ fb->base.id, event);
- if (omap_crtc->event) {
+ if (omap_crtc->old_fb) {
dev_err(dev->dev, "already a pending flip\n");
return -EINVAL;
}
- omap_crtc->old_fb = crtc->fb;
omap_crtc->event = event;
crtc->fb = fb;
@@ -234,14 +363,244 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.load_lut = omap_crtc_load_lut,
};
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+ /* avoid getting in a flood, unregister the irq until next vblank */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!omap_crtc->error_irq.registered)
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ dispc_mgr_go(channel);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait = NULL;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ /* ignore sync-lost irqs during enable/disable */
+ omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+ if (dispc_mgr_get_framedone_irq(channel)) {
+ if (!enable) {
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_framedone_irq(channel), 1);
+ }
+ } else {
+ /*
+ * When we disable digit output, we need to wait until fields
+ * are done. Otherwise the DSS is still working, and turning
+ * off the clocks prevents DSS from going to OFF mode. And when
+ * enabling, we need to wait for the extra sync losts
+ */
+ wait = omap_irq_wait_init(dev,
+ dispc_mgr_get_vsync_irq(channel), 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ if (wait) {
+ int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+ }
+
+ omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_encoder *encoder = NULL;
+
+ DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+ omap_crtc->enabled, omap_crtc->full_update);
+
+ if (omap_crtc->full_update) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ int i;
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+ }
+
+ if (!omap_crtc->enabled) {
+ set_enabled(&omap_crtc->base, false);
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, &omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ omap_crtc->full_update = false;
+ }
+
+ dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+ dispc_mgr_set_timings(omap_crtc->channel,
+ &omap_crtc->timings);
+ set_enabled(&omap_crtc->base, true);
+ }
+
+ omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct omap_overlay *ovl, int id)
+ struct drm_plane *plane, enum omap_channel channel, int id)
{
struct drm_crtc *crtc = NULL;
- struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+ struct omap_crtc *omap_crtc;
+ struct omap_overlay_manager_info *info;
+
+ DBG("%s", channel_names[channel]);
- DBG("%s", ovl->name);
+ omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc) {
dev_err(dev->dev, "could not allocate CRTC\n");
@@ -250,10 +609,40 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
crtc = &omap_crtc->base;
- omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
+ INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+ INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+ INIT_LIST_HEAD(&omap_crtc->pending_applies);
+ INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+ omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
+ omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+ omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+ omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+ omap_crtc->error_irq.irqmask =
+ dispc_mgr_get_sync_lost_irq(channel);
+ omap_crtc->error_irq.irq = omap_crtc_error_irq;
+ omap_irq_register(dev, &omap_crtc->error_irq);
+
+ omap_crtc->channel = channel;
+ omap_crtc->plane = plane;
omap_crtc->plane->crtc = crtc;
- omap_crtc->name = ovl->name;
- omap_crtc->id = id;
+ omap_crtc->name = channel_names[channel];
+ omap_crtc->pipe = id;
+
+ /* temporary: */
+ omap_crtc->mgr.id = channel;
+
+ dss_install_mgr_ops(&mgr_ops);
+
+ /* TODO: fix hard-coded setup.. add properties! */
+ info = &omap_crtc->info;
+ info->default_color = 0x00000000;
+ info->trans_key = 0x00000000;
+ info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info->trans_enabled = false;
drm_crtc_init(dev, crtc, &omap_crtc_funcs);
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 84943e5ba1d6..ae5ecc2efbc7 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -74,320 +74,99 @@ static int get_connector_type(struct omap_dss_device *dssdev)
}
}
-#if 0 /* enable when dss2 supports hotplug */
-static int omap_drm_notifier(struct notifier_block *nb,
- unsigned long evt, void *arg)
-{
- switch (evt) {
- case OMAP_DSS_SIZE_CHANGE:
- case OMAP_DSS_HOTPLUG_CONNECT:
- case OMAP_DSS_HOTPLUG_DISCONNECT: {
- struct drm_device *dev = drm_device;
- DBG("hotplug event: evt=%d, dev=%p", evt, dev);
- if (dev)
- drm_sysfs_hotplug_event(dev);
-
- return NOTIFY_OK;
- }
- default: /* don't care about other events for now */
- return NOTIFY_DONE;
- }
-}
-#endif
-
-static void dump_video_chains(void)
-{
- int i;
-
- DBG("dumping video chains: ");
- for (i = 0; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
- struct omap_overlay_manager *mgr = ovl->manager;
- struct omap_dss_device *dssdev = mgr ?
- mgr->get_device(mgr) : NULL;
- if (dssdev) {
- DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
- dssdev->name);
- } else if (mgr) {
- DBG("%d: %s -> %s", i, ovl->name, mgr->name);
- } else {
- DBG("%d: %s", i, ovl->name);
- }
- }
-}
-
-/* create encoders for each manager */
-static int create_encoder(struct drm_device *dev,
- struct omap_overlay_manager *mgr)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
-
- if (!encoder) {
- dev_err(dev->dev, "could not create encoder: %s\n",
- mgr->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
-
- priv->encoders[priv->num_encoders++] = encoder;
-
- return 0;
-}
-
-/* create connectors for each display device */
-static int create_connector(struct drm_device *dev,
- struct omap_dss_device *dssdev)
+static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- static struct notifier_block *notifier;
- struct drm_connector *connector;
- int j;
-
- if (!dssdev->driver) {
- dev_warn(dev->dev, "%s has no driver.. skipping it\n",
- dssdev->name);
- return 0;
- }
+ struct omap_dss_device *dssdev = NULL;
+ int num_ovls = dss_feat_get_num_ovls();
+ int id;
- if (!(dssdev->driver->get_timings ||
- dssdev->driver->read_edid)) {
- dev_warn(dev->dev, "%s driver does not support "
- "get_timings or read_edid.. skipping it!\n",
- dssdev->name);
- return 0;
- }
+ drm_mode_config_init(dev);
- connector = omap_connector_init(dev,
- get_connector_type(dssdev), dssdev);
+ omap_drm_irq_install(dev);
- if (!connector) {
- dev_err(dev->dev, "could not create connector: %s\n",
- dssdev->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+ /*
+ * Create private planes and CRTCs for the last NUM_CRTCs overlay
+ * plus manager:
+ */
+ for (id = 0; id < min(num_crtc, num_ovls); id++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
- priv->connectors[priv->num_connectors++] = connector;
+ plane = omap_plane_init(dev, id, true);
+ crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
-#if 0 /* enable when dss2 supports hotplug */
- notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- notifier->notifier_call = omap_drm_notifier;
- omap_dss_add_notify(dssdev, notifier);
-#else
- notifier = NULL;
-#endif
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
- for (j = 0; j < priv->num_encoders; j++) {
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(priv->encoders[j]);
- if (mgr->get_device(mgr) == dssdev) {
- drm_mode_connector_attach_encoder(connector,
- priv->encoders[j]);
- }
+ priv->planes[id] = plane;
+ priv->num_planes++;
}
- return 0;
-}
-
-/* create up to max_overlays CRTCs mapping to overlays.. by default,
- * connect the overlays to different managers/encoders, giving priority
- * to encoders connected to connectors with a detected connection
- */
-static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
- int *j, unsigned int connected_connectors)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_overlay_manager *mgr = NULL;
- struct drm_crtc *crtc;
-
- /* find next best connector, ones with detected connection first
+ /*
+ * Create normal planes for the remaining overlays:
*/
- while (*j < priv->num_connectors && !mgr) {
- if (connected_connectors & (1 << *j)) {
- struct drm_encoder *encoder =
- omap_connector_attached_encoder(
- priv->connectors[*j]);
- if (encoder)
- mgr = omap_encoder_get_manager(encoder);
+ for (; id < num_ovls; id++) {
+ struct drm_plane *plane = omap_plane_init(dev, id, false);
- }
- (*j)++;
+ BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+ priv->planes[priv->num_planes++] = plane;
}
- /* if we couldn't find another connected connector, lets start
- * looking at the unconnected connectors:
- *
- * note: it might not be immediately apparent, but thanks to
- * the !mgr check in both this loop and the one above, the only
- * way to enter this loop is with *j == priv->num_connectors,
- * so idx can never go negative.
- */
- while (*j < 2 * priv->num_connectors && !mgr) {
- int idx = *j - priv->num_connectors;
- if (!(connected_connectors & (1 << idx))) {
- struct drm_encoder *encoder =
- omap_connector_attached_encoder(
- priv->connectors[idx]);
- if (encoder)
- mgr = omap_encoder_get_manager(encoder);
+ for_each_dss_dev(dssdev) {
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
}
- (*j)++;
- }
-
- crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
-
- if (!crtc) {
- dev_err(dev->dev, "could not create CRTC: %s\n",
- ovl->name);
- return -ENOMEM;
- }
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-
- priv->crtcs[priv->num_crtcs++] = crtc;
-
- return 0;
-}
-
-static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
- unsigned int possible_crtcs)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct drm_plane *plane =
- omap_plane_init(dev, ovl, possible_crtcs, false);
-
- if (!plane) {
- dev_err(dev->dev, "could not create plane: %s\n",
- ovl->name);
- return -ENOMEM;
- }
-
- BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-
- priv->planes[priv->num_planes++] = plane;
-
- return 0;
-}
-
-static int match_dev_name(struct omap_dss_device *dssdev, void *data)
-{
- return !strcmp(dssdev->name, data);
-}
-
-static unsigned int detect_connectors(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- unsigned int connected_connectors = 0;
- int i;
-
- for (i = 0; i < priv->num_connectors; i++) {
- struct drm_connector *connector = priv->connectors[i];
- if (omap_connector_detect(connector, true) ==
- connector_status_connected) {
- connected_connectors |= (1 << i);
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
}
- }
-
- return connected_connectors;
-}
-static int omap_modeset_init(struct drm_device *dev)
-{
- const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
- struct omap_kms_platform_data *kms_pdata = NULL;
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev = NULL;
- int i, j;
- unsigned int connected_connectors = 0;
+ encoder = omap_encoder_init(dev, dssdev);
- drm_mode_config_init(dev);
-
- if (pdata && pdata->kms_pdata) {
- kms_pdata = pdata->kms_pdata;
-
- /* if platform data is provided by the board file, use it to
- * control which overlays, managers, and devices we own.
- */
- for (i = 0; i < kms_pdata->mgr_cnt; i++) {
- struct omap_overlay_manager *mgr =
- omap_dss_get_overlay_manager(
- kms_pdata->mgr_ids[i]);
- create_encoder(dev, mgr);
- }
-
- for (i = 0; i < kms_pdata->dev_cnt; i++) {
- struct omap_dss_device *dssdev =
- omap_dss_find_device(
- (void *)kms_pdata->dev_names[i],
- match_dev_name);
- if (!dssdev) {
- dev_warn(dev->dev, "no such dssdev: %s\n",
- kms_pdata->dev_names[i]);
- continue;
- }
- create_connector(dev, dssdev);
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ dssdev->name);
+ return -ENOMEM;
}
- connected_connectors = detect_connectors(dev);
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev, encoder);
- j = 0;
- for (i = 0; i < kms_pdata->ovl_cnt; i++) {
- struct omap_overlay *ovl =
- omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
- create_crtc(dev, ovl, &j, connected_connectors);
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
}
- for (i = 0; i < kms_pdata->pln_cnt; i++) {
- struct omap_overlay *ovl =
- omap_dss_get_overlay(kms_pdata->pln_ids[i]);
- create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
- }
- } else {
- /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
- * to make educated guesses about everything else
- */
- int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
- for (i = 0; i < omap_dss_get_num_overlay_managers(); i++)
- create_encoder(dev, omap_dss_get_overlay_manager(i));
-
- for_each_dss_dev(dssdev) {
- create_connector(dev, dssdev);
- }
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
- connected_connectors = detect_connectors(dev);
+ drm_mode_connector_attach_encoder(connector, encoder);
- j = 0;
- for (i = 0; i < max_overlays; i++) {
- create_crtc(dev, omap_dss_get_overlay(i),
- &j, connected_connectors);
- }
-
- /* use any remaining overlays as drm planes */
- for (; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
- create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+ /* figure out which crtc's we can connect the encoder to: */
+ encoder->possible_crtcs = 0;
+ for (id = 0; id < priv->num_crtcs; id++) {
+ enum omap_dss_output_id supported_outputs =
+ dss_feat_get_supported_outputs(pipe2chan(id));
+ if (supported_outputs & dssdev->output->id)
+ encoder->possible_crtcs |= (1 << id);
}
}
- /* for now keep the mapping of CRTCs and encoders static.. */
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
- struct omap_overlay_manager *mgr =
- omap_encoder_get_manager(encoder);
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
-
- DBG("%s: possible_crtcs=%08x", mgr->name,
- encoder->possible_crtcs);
- }
-
- dump_video_chains();
-
dev->mode_config.min_width = 32;
dev->mode_config.min_height = 32;
@@ -450,7 +229,7 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_new *args = data;
- DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, args->flags);
return omap_gem_new_handle(dev, file_priv, args->size,
args->flags, &args->handle);
@@ -510,7 +289,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret = 0;
- DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj)
@@ -565,14 +344,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
- ret = omapdss_compat_init();
- if (ret) {
- dev_err(dev->dev, "coult not init omapdss\n");
- dev->dev_private = NULL;
- kfree(priv);
- return ret;
- }
-
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
INIT_LIST_HEAD(&priv->obj_list);
@@ -584,10 +355,13 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
dev->dev_private = NULL;
kfree(priv);
- omapdss_compat_uninit();
return ret;
}
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret)
+ dev_warn(dev->dev, "could not init vblank\n");
+
priv->fbdev = omap_fbdev_init(dev);
if (!priv->fbdev) {
dev_warn(dev->dev, "omap_fbdev_init failed\n");
@@ -596,10 +370,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, priv->num_crtcs);
- if (ret)
- dev_warn(dev->dev, "could not init vblank\n");
-
return 0;
}
@@ -609,8 +379,9 @@ static int dev_unload(struct drm_device *dev)
DBG("unload: dev=%p", dev);
- drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ omap_drm_irq_uninstall(dev);
omap_fbdev_free(dev);
omap_modeset_free(dev);
@@ -619,8 +390,6 @@ static int dev_unload(struct drm_device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- omapdss_compat_uninit();
-
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -680,7 +449,9 @@ static void dev_lastclose(struct drm_device *dev)
}
}
+ mutex_lock(&dev->mode_config.mutex);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ mutex_unlock(&dev->mode_config.mutex);
if (ret)
DBG("failed to restore crtc mode");
}
@@ -695,60 +466,6 @@ static void dev_postclose(struct drm_device *dev, struct drm_file *file)
DBG("postclose: dev=%p, file=%p", dev, file);
}
-/**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
-static int dev_enable_vblank(struct drm_device *dev, int crtc)
-{
- DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
- return 0;
-}
-
-/**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- */
-static void dev_disable_vblank(struct drm_device *dev, int crtc)
-{
- DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
-}
-
-static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
-{
- return IRQ_HANDLED;
-}
-
-static void dev_irq_preinstall(struct drm_device *dev)
-{
- DBG("irq_preinstall: dev=%p", dev);
-}
-
-static int dev_irq_postinstall(struct drm_device *dev)
-{
- DBG("irq_postinstall: dev=%p", dev);
- return 0;
-}
-
-static void dev_irq_uninstall(struct drm_device *dev)
-{
- DBG("irq_uninstall: dev=%p", dev);
-}
-
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
@@ -778,12 +495,12 @@ static struct drm_driver omap_drm_driver = {
.preclose = dev_preclose,
.postclose = dev_postclose,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = dev_enable_vblank,
- .disable_vblank = dev_disable_vblank,
- .irq_preinstall = dev_irq_preinstall,
- .irq_postinstall = dev_irq_postinstall,
- .irq_uninstall = dev_irq_uninstall,
- .irq_handler = dev_irq_handler,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
.debugfs_cleanup = omap_debugfs_cleanup,
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 1d4aea53b75d..cd1f22b0b124 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -28,6 +28,7 @@
#include <linux/platform_data/omap_drm.h>
#include "omap_drm.h"
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -39,6 +40,51 @@
*/
#define MAX_MAPPERS 2
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+ uint32_t rotation;
+ int32_t crtc_x, crtc_y; /* signed because can be offscreen */
+ uint32_t crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared. So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared. The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+ struct list_head pending_node, queued_node;
+ bool queued;
+ void (*pre_apply)(struct omap_drm_apply *apply);
+ void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration. We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout);
+
struct omap_drm_private {
uint32_t omaprev;
@@ -58,6 +104,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
+ /* list of GEM objects: */
struct list_head obj_list;
bool has_dmm;
@@ -65,6 +112,11 @@ struct omap_drm_private {
/* properties: */
struct drm_property *rotation_prop;
struct drm_property *zorder_prop;
+
+ /* irq handling: */
+ struct list_head irq_list; /* list of omap_drm_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ struct omap_drm_irq error_handler;
};
/* this should probably be in drm-core to standardize amongst drivers */
@@ -75,15 +127,6 @@ struct omap_drm_private {
#define DRM_REFLECT_X 4
#define DRM_REFLECT_Y 5
-/* parameters which describe (unrotated) coordinates of scanout within a fb: */
-struct omap_drm_window {
- uint32_t rotation;
- int32_t crtc_x, crtc_y; /* signed because can be offscreen */
- uint32_t crtc_w, crtc_h;
- uint32_t src_x, src_y;
- uint32_t src_w, src_h;
-};
-
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
void omap_debugfs_cleanup(struct drm_minor *minor);
@@ -92,23 +135,36 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
#endif
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
void omap_fbdev_free(struct drm_device *dev);
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct omap_overlay *ovl, int id);
+ struct drm_plane *plane, enum omap_channel channel, int id);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- struct omap_overlay *ovl, unsigned int possible_crtcs,
- bool priv);
+ int plane_id, bool private_plane);
int omap_plane_dpms(struct drm_plane *plane, int mode);
int omap_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-void omap_plane_on_endwin(struct drm_plane *plane,
+ uint32_t src_w, uint32_t src_h,
void (*fxn)(void *), void *arg);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
@@ -116,21 +172,25 @@ int omap_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_overlay_manager *mgr);
-struct omap_overlay_manager *omap_encoder_get_manager(
+ struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
-enum drm_connector_status omap_connector_detect(
- struct drm_connector *connector, bool force);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev);
-void omap_connector_mode_set(struct drm_connector *connector,
- struct drm_display_mode *mode);
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode);
+
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
@@ -207,6 +267,40 @@ static inline int align_pitch(int pitch, int width, int bpp)
return ALIGN(pitch, 8 * bytespp);
}
+static inline enum omap_channel pipe2chan(int pipe)
+{
+ int num_mgrs = dss_feat_get_num_mgrs();
+
+ /*
+ * We usually don't want to create a CRTC for each manager,
+ * at least not until we have a way to expose private planes
+ * to userspace. Otherwise there would not be enough video
+ * pipes left for drm planes. The higher #'d managers tend
+ * to have more features so start in reverse order.
+ */
+ return num_mgrs - pipe - 1;
+}
+
+/* map crtc to vblank mask */
+static inline uint32_t pipe2vbl(int crtc)
+{
+ enum omap_channel channel = pipe2chan(crtc);
+ return dispc_mgr_get_vsync_irq(channel);
+}
+
+static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
+ if (priv->crtcs[i] == crtc)
+ return i;
+
+ BUG(); /* bogus CRTC ptr */
+ return -1;
+}
+
/* should these be made into common util helpers?
*/
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
index 5341d5e3e317..e053160d2db3 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -22,37 +22,56 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include <linux/list.h>
+
+
/*
* encoder funcs
*/
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
struct omap_encoder {
struct drm_encoder base;
- struct omap_overlay_manager *mgr;
+ struct omap_dss_device *dssdev;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s", omap_encoder->mgr->name);
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s: %d", omap_encoder->mgr->name, mode);
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- DBG("%s", omap_encoder->mgr->name);
return true;
}
@@ -60,47 +79,16 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct omap_drm_private *priv = dev->dev_private;
- int i;
-
- mode = adjusted_mode;
-
- DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
- mode->hdisplay, mode->vdisplay);
-
- for (i = 0; i < priv->num_connectors; i++) {
- struct drm_connector *connector = priv->connectors[i];
- if (connector->encoder == encoder)
- omap_connector_mode_set(connector, mode);
-
- }
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- DBG("%s", omap_encoder->mgr->name);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void omap_encoder_commit(struct drm_encoder *encoder)
{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- DBG("%s", omap_encoder->mgr->name);
- omap_encoder->mgr->apply(omap_encoder->mgr);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
-static const struct drm_encoder_funcs omap_encoder_funcs = {
- .destroy = omap_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.dpms = omap_encoder_dpms,
.mode_fixup = omap_encoder_mode_fixup,
@@ -109,23 +97,54 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.commit = omap_encoder_commit,
};
-struct omap_overlay_manager *omap_encoder_get_manager(
- struct drm_encoder *encoder)
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- return omap_encoder->mgr;
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ if (enabled) {
+ return dssdrv->enable(dssdev);
+ } else {
+ dssdrv->disable(dssdev);
+ return 0;
+ }
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+ struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ int ret;
+
+ dssdev->output->manager = mgr;
+
+ ret = dssdrv->check_timings(dssdev, timings);
+ if (ret) {
+ dev_err(dev->dev, "could not set timings: %d\n", ret);
+ return ret;
+ }
+
+ dssdrv->set_timings(dssdev, timings);
+
+ return 0;
}
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_overlay_manager *mgr)
+ struct omap_dss_device *dssdev)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
- struct omap_overlay_manager_info info;
- int ret;
-
- DBG("%s", mgr->name);
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
if (!omap_encoder) {
@@ -133,33 +152,14 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
goto fail;
}
- omap_encoder->mgr = mgr;
+ omap_encoder->dssdev = dssdev;
+
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
- mgr->get_manager_info(mgr, &info);
-
- /* TODO: fix hard-coded setup.. */
- info.default_color = 0x00000000;
- info.trans_key = 0x00000000;
- info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
- info.trans_enabled = false;
-
- ret = mgr->set_manager_info(mgr, &info);
- if (ret) {
- dev_err(dev->dev, "could not set manager info\n");
- goto fail;
- }
-
- ret = mgr->apply(mgr);
- if (ret) {
- dev_err(dev->dev, "could not apply\n");
- goto fail;
- }
-
return encoder;
fail:
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
index ea3840038250..b6c5b5c6c8c5 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
@@ -194,7 +194,7 @@ struct dma_buf_ops omap_dmabuf_ops = {
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
+ return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
}
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/staging/omapdrm/omap_irq.c
new file mode 100644
index 000000000000..2629ba7be6c8
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_irq.c
@@ -0,0 +1,322 @@
+/*
+ * drivers/staging/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+ uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *irq;
+ uint32_t irqmask = priv->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &priv->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ DBG("irqmask=%08x", irqmask);
+
+ dispc_write_irqenable(irqmask);
+ dispc_read_irqenable(); /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(irq->registered)) {
+ irq->registered = true;
+ list_add(&irq->node, &priv->irq_list);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ unsigned long flags;
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!WARN_ON(!irq->registered)) {
+ irq->registered = false;
+ list_del(&irq->node);
+ omap_irq_update(dev);
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+ struct omap_drm_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_irq_wait *wait =
+ container_of(irq, struct omap_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count)
+{
+ struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ wait->irq.irq = wait_irq;
+ wait->irq.irqmask = irqmask;
+ wait->count = count;
+ omap_irq_register(dev, &wait->irq);
+ return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout)
+{
+ int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+ omap_irq_unregister(dev, &wait->irq);
+ kfree(wait);
+ if (ret == 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask |= pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ DBG("dev=%p, crtc=%d", dev, crtc);
+
+ dispc_runtime_get();
+ spin_lock_irqsave(&list_lock, flags);
+ priv->vblank_mask &= ~pipe2vbl(crtc);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&list_lock, flags);
+ dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *handler, *n;
+ unsigned long flags;
+ unsigned int id;
+ u32 irqstatus;
+
+ irqstatus = dispc_read_irqstatus();
+ dispc_clear_irqstatus(irqstatus);
+ dispc_read_irqstatus(); /* flush posted write */
+
+ VERB("irqs: %08x", irqstatus);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (irqstatus & pipe2vbl(id))
+ drm_handle_vblank(dev, id);
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+ if (handler->irqmask & irqstatus) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & irqstatus);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ dispc_runtime_get();
+ dispc_clear_irqstatus(0xffffffff);
+ dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_drm_irq *error_handler = &priv->error_handler;
+
+ DBG("dev=%p", dev);
+
+ INIT_LIST_HEAD(&priv->irq_list);
+
+ error_handler->irq = omap_irq_error_handler;
+ error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+ /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+ * we just need to ignore it while enabling tv-out
+ */
+ error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+ omap_irq_register(dev, error_handler);
+
+ return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+ DBG("dev=%p", dev);
+ // TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss. Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev->irq_enabled) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+
+ ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* After installing handler */
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ dispc_free_irq(dev);
+ }
+
+ return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+ unsigned long irqflags;
+ int irq_enabled, i;
+
+ mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * Wake up any waiters so they don't hang.
+ */
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+
+ if (!irq_enabled)
+ return -EINVAL;
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ dispc_free_irq(dev);
+
+ return 0;
+}
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
index 2a8e5bab49c9..bb989d7f026d 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -41,12 +41,14 @@ struct callback {
struct omap_plane {
struct drm_plane base;
- struct omap_overlay *ovl;
+ int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+ const char *name;
struct omap_overlay_info info;
+ struct omap_drm_apply apply;
/* position/orientation of scanout within the fb: */
struct omap_drm_window win;
-
+ bool enabled;
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
@@ -54,189 +56,15 @@ struct omap_plane {
uint32_t nformats;
uint32_t formats[32];
- /* for synchronizing access to unpins fifo */
- struct mutex unpin_mutex;
+ struct omap_drm_irq error_irq;
- /* set of bo's pending unpin until next END_WIN irq */
+ /* set of bo's pending unpin until next post_apply() */
DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
- int num_unpins, pending_num_unpins;
-
- /* for deferred unpin when we need to wait for scanout complete irq */
- struct work_struct work;
-
- /* callback on next endwin irq */
- struct callback endwin;
-};
-/* map from ovl->id to the irq we are interested in for scanout-done */
-static const uint32_t id2irq[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+ // XXX maybe get rid of this and handle vblank in crtc too?
+ struct callback apply_done_cb;
};
-static void dispc_isr(void *arg, uint32_t mask)
-{
- struct drm_plane *plane = arg;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_drm_private *priv = plane->dev->dev_private;
-
- omap_dispc_unregister_isr(dispc_isr, plane,
- id2irq[omap_plane->ovl->id]);
-
- queue_work(priv->wq, &omap_plane->work);
-}
-
-static void unpin_worker(struct work_struct *work)
-{
- struct omap_plane *omap_plane =
- container_of(work, struct omap_plane, work);
- struct callback endwin;
-
- mutex_lock(&omap_plane->unpin_mutex);
- DBG("unpinning %d of %d", omap_plane->num_unpins,
- omap_plane->num_unpins + omap_plane->pending_num_unpins);
- while (omap_plane->num_unpins > 0) {
- struct drm_gem_object *bo = NULL;
- int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
- WARN_ON(!ret);
- omap_gem_put_paddr(bo);
- drm_gem_object_unreference_unlocked(bo);
- omap_plane->num_unpins--;
- }
- endwin = omap_plane->endwin;
- omap_plane->endwin.fxn = NULL;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- if (endwin.fxn)
- endwin.fxn(endwin.arg);
-}
-
-static void install_irq(struct drm_plane *plane)
-{
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- int ret;
-
- ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
-
- /*
- * omapdss has upper limit on # of registered irq handlers,
- * which we shouldn't hit.. but if we do the limit should
- * be raised or bad things happen:
- */
- WARN_ON(ret == -EBUSY);
-}
-
-/* push changes down to dss2 */
-static int commit(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- struct omap_overlay_info *info = &omap_plane->info;
- int ret;
-
- DBG("%s", ovl->name);
- DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
- info->out_height, info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
-
- /* NOTE: do we want to do this at all here, or just wait
- * for dpms(ON) since other CRTC's may not have their mode
- * set yet, so fb dimensions may still change..
- */
- ret = ovl->set_overlay_info(ovl, info);
- if (ret) {
- dev_err(dev->dev, "could not set overlay info\n");
- return ret;
- }
-
- mutex_lock(&omap_plane->unpin_mutex);
- omap_plane->num_unpins += omap_plane->pending_num_unpins;
- omap_plane->pending_num_unpins = 0;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- /* our encoder doesn't necessarily get a commit() after this, in
- * particular in the dpms() and mode_set_base() cases, so force the
- * manager to update:
- *
- * could this be in the encoder somehow?
- */
- if (ovl->manager) {
- ret = ovl->manager->apply(ovl->manager);
- if (ret) {
- dev_err(dev->dev, "could not apply settings\n");
- return ret;
- }
-
- /*
- * NOTE: really this should be atomic w/ mgr->apply() but
- * omapdss does not expose such an API
- */
- if (omap_plane->num_unpins > 0)
- install_irq(plane);
-
- } else {
- struct omap_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &omap_plane->work);
- }
-
-
- if (ovl->is_enabled(ovl)) {
- omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
- info->out_width, info->out_height);
- }
-
- return 0;
-}
-
-/* when CRTC that we are attached to has potentially changed, this checks
- * if we are attached to proper manager, and if necessary updates.
- */
-static void update_manager(struct drm_plane *plane)
-{
- struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- struct omap_overlay_manager *mgr = NULL;
- int i;
-
- if (plane->crtc) {
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
- if (encoder->crtc == plane->crtc) {
- mgr = omap_encoder_get_manager(encoder);
- break;
- }
- }
- }
-
- if (ovl->manager != mgr) {
- bool enabled = ovl->is_enabled(ovl);
-
- /* don't switch things around with enabled overlays: */
- if (enabled)
- omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
-
- if (ovl->manager) {
- DBG("disconnecting %s from %s", ovl->name,
- ovl->manager->name);
- ovl->unset_manager(ovl);
- }
-
- if (mgr) {
- DBG("connecting %s to %s", ovl->name, mgr->name);
- ovl->set_manager(ovl, mgr);
- }
-
- if (enabled && mgr)
- omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- }
-}
-
static void unpin(void *arg, struct drm_gem_object *bo)
{
struct drm_plane *plane = arg;
@@ -244,7 +72,6 @@ static void unpin(void *arg, struct drm_gem_object *bo)
if (kfifo_put(&omap_plane->unpin_fifo,
(const struct drm_gem_object **)&bo)) {
- omap_plane->pending_num_unpins++;
/* also hold a ref so it isn't free'd while pinned */
drm_gem_object_reference(bo);
} else {
@@ -264,13 +91,19 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
DBG("%p -> %p", pinned_fb, fb);
- mutex_lock(&omap_plane->unpin_mutex);
+ if (fb)
+ drm_framebuffer_reference(fb);
+
ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
- mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (pinned_fb)
+ drm_framebuffer_unreference(pinned_fb);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
+ if (fb)
+ drm_framebuffer_unreference(fb);
omap_plane->pinned_fb = NULL;
return ret;
}
@@ -281,31 +114,90 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
return 0;
}
-/* update parameters that are dependent on the framebuffer dimensions and
- * position within the fb that this plane scans out from. This is called
- * when framebuffer or x,y base may have changed.
- */
-static void update_scanout(struct drm_plane *plane)
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
{
- struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay_info *info = &omap_plane->info;
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
struct omap_drm_window *win = &omap_plane->win;
+ struct drm_plane *plane = &omap_plane->base;
+ struct drm_device *dev = plane->dev;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_crtc *crtc = plane->crtc;
+ enum omap_channel channel;
+ bool enabled = omap_plane->enabled && crtc;
+ bool ilace, replication;
int ret;
- ret = update_pin(plane, plane->fb);
- if (ret) {
- dev_err(plane->dev->dev,
- "could not pin fb: %d\n", ret);
- omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+ /* if fb has changed, pin new fb: */
+ update_pin(plane, enabled ? plane->fb : NULL);
+
+ if (!enabled) {
+ dispc_ovl_enable(omap_plane->id, false);
return;
}
+ channel = omap_crtc_channel(crtc);
+
+ /* update scanout: */
omap_framebuffer_update_scanout(plane->fb, win, info);
- DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
- win->src_x, win->src_y,
- (u32)info->paddr, (u32)info->p_uv_addr,
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+ info->out_width, info->out_height,
info->screen_width);
+ DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+ info->paddr, info->p_uv_addr);
+
+ /* TODO: */
+ ilace = false;
+ replication = false;
+
+ /* and finally, update omapdss: */
+ ret = dispc_ovl_setup(omap_plane->id, info,
+ replication, omap_crtc_timings(crtc), false);
+ if (ret) {
+ dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+ return;
+ }
+
+ dispc_ovl_enable(omap_plane->id, true);
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+ struct omap_plane *omap_plane =
+ container_of(apply, struct omap_plane, apply);
+ struct drm_plane *plane = &omap_plane->base;
+ struct omap_overlay_info *info = &omap_plane->info;
+ struct drm_gem_object *bo = NULL;
+ struct callback cb;
+
+ cb = omap_plane->apply_done_cb;
+ omap_plane->apply_done_cb.fxn = NULL;
+
+ while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+ omap_gem_put_paddr(bo);
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ if (cb.fxn)
+ cb.fxn(cb.arg);
+
+ if (omap_plane->enabled) {
+ omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+ info->out_width, info->out_height);
+ }
+}
+
+static int apply(struct drm_plane *plane)
+{
+ if (plane->crtc) {
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+ }
+ return 0;
}
int omap_plane_mode_set(struct drm_plane *plane,
@@ -313,7 +205,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h,
+ void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
@@ -329,17 +222,20 @@ int omap_plane_mode_set(struct drm_plane *plane,
win->src_w = src_w >> 16;
win->src_h = src_h >> 16;
- /* note: this is done after this fxn returns.. but if we need
- * to do a commit/update_scanout, etc before this returns we
- * need the current value.
- */
+ if (fxn) {
+ /* omap_crtc should ensure that a new page flip
+ * isn't permitted while there is one pending:
+ */
+ BUG_ON(omap_plane->apply_done_cb.fxn);
+
+ omap_plane->apply_done_cb.fxn = fxn;
+ omap_plane->apply_done_cb.arg = arg;
+ }
+
plane->fb = fb;
plane->crtc = crtc;
- update_scanout(plane);
- update_manager(plane);
-
- return 0;
+ return apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
@@ -349,9 +245,12 @@ static int omap_plane_update(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ omap_plane->enabled = true;
+ return omap_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h,
+ NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
@@ -364,48 +263,32 @@ static int omap_plane_disable(struct drm_plane *plane)
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- DBG("%s", omap_plane->ovl->name);
+
+ DBG("%s", omap_plane->name);
+
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
omap_plane_disable(plane);
drm_plane_cleanup(plane);
- WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+
+ WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
kfifo_free(&omap_plane->unpin_fifo);
+
kfree(omap_plane);
}
int omap_plane_dpms(struct drm_plane *plane, int mode)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_overlay *ovl = omap_plane->ovl;
- int r;
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+ int ret = 0;
- DBG("%s: %d", omap_plane->ovl->name, mode);
-
- if (mode == DRM_MODE_DPMS_ON) {
- update_scanout(plane);
- r = commit(plane);
- if (!r)
- r = ovl->enable(ovl);
- } else {
- struct omap_drm_private *priv = plane->dev->dev_private;
- r = ovl->disable(ovl);
- update_pin(plane, NULL);
- queue_work(priv->wq, &omap_plane->work);
+ if (enabled != omap_plane->enabled) {
+ omap_plane->enabled = enabled;
+ ret = apply(plane);
}
- return r;
-}
-
-void omap_plane_on_endwin(struct drm_plane *plane,
- void (*fxn)(void *), void *arg)
-{
- struct omap_plane *omap_plane = to_omap_plane(plane);
-
- mutex_lock(&omap_plane->unpin_mutex);
- omap_plane->endwin.fxn = fxn;
- omap_plane->endwin.arg = arg;
- mutex_unlock(&omap_plane->unpin_mutex);
-
- install_irq(plane);
+ return ret;
}
/* helper to install properties which are common to planes and crtcs */
@@ -454,25 +337,13 @@ int omap_plane_set_property(struct drm_plane *plane,
int ret = -EINVAL;
if (property == priv->rotation_prop) {
- struct omap_overlay *ovl = omap_plane->ovl;
-
- DBG("%s: rotation: %02x", ovl->name, (uint32_t)val);
+ DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
-
- if (ovl->is_enabled(ovl))
- ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- else
- ret = 0;
+ ret = apply(plane);
} else if (property == priv->zorder_prop) {
- struct omap_overlay *ovl = omap_plane->ovl;
-
- DBG("%s: zorder: %d", ovl->name, (uint32_t)val);
+ DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
-
- if (ovl->is_enabled(ovl))
- ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
- else
- ret = 0;
+ ret = apply(plane);
}
return ret;
@@ -485,20 +356,38 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.set_property = omap_plane_set_property,
};
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_plane *omap_plane =
+ container_of(irq, struct omap_plane, error_irq);
+ DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- struct omap_overlay *ovl, unsigned int possible_crtcs,
- bool priv)
+ int id, bool private_plane)
{
+ struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
+ struct omap_overlay_info *info;
int ret;
- DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
- possible_crtcs, priv);
-
- /* friendly reminder to update table for future hw: */
- WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+ DBG("%s: priv=%d", plane_names[id], private_plane);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane) {
@@ -506,47 +395,50 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
goto fail;
}
- mutex_init(&omap_plane->unpin_mutex);
-
ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
if (ret) {
dev_err(dev->dev, "could not allocate unpin FIFO\n");
goto fail;
}
- INIT_WORK(&omap_plane->work, unpin_worker);
-
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
- ovl->supported_modes);
- omap_plane->ovl = ovl;
+ dss_feat_get_supported_color_modes(id));
+ omap_plane->id = id;
+ omap_plane->name = plane_names[id];
+
plane = &omap_plane->base;
- drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
- omap_plane->formats, omap_plane->nformats, priv);
+ omap_plane->apply.pre_apply = omap_plane_pre_apply;
+ omap_plane->apply.post_apply = omap_plane_post_apply;
+
+ omap_plane->error_irq.irqmask = error_irqs[id];
+ omap_plane->error_irq.irq = omap_plane_error_irq;
+ omap_irq_register(dev, &omap_plane->error_irq);
+
+ drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+ omap_plane->formats, omap_plane->nformats, private_plane);
omap_plane_install_properties(plane, &plane->base);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
*/
- ovl->get_overlay_info(ovl, &omap_plane->info);
- omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
- omap_plane->info.rotation = OMAP_DSS_ROT_0;
- omap_plane->info.global_alpha = 0xff;
- omap_plane->info.mirror = 0;
+ info = &omap_plane->info;
+ info->rotation_type = OMAP_DSS_ROT_DMA;
+ info->rotation = OMAP_DSS_ROT_0;
+ info->global_alpha = 0xff;
+ info->mirror = 0;
/* Set defaults depending on whether we are a CRTC or overlay
* layer.
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
- if (priv)
+ if (private_plane)
omap_plane->info.zorder = 0;
else
- omap_plane->info.zorder = ovl->id;
-
- update_manager(plane);
+ omap_plane->info.zorder = id;
return plane;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index ae38475854b5..d10d75e8a33f 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -937,7 +937,8 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(pdev, dma_tmp))
+ return -1;
if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
&(priv->rxbufferhead))) {
DMESGE("Unable to allocate mem RX buf");
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 808aab6fa5ef..a9d78e9651c6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1183,6 +1183,8 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->TxRate,
cb_desc);
+ if (pci_dma_mapping_error(priv->pdev, mapping))
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1280,6 +1282,8 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping))
+ RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
memset(entry, 0, 12);
entry->LINIP = cb_desc->bLastIniPkt;
entry->FirstSeg = 1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 1a70f324552f..4ebf99b30975 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2104,7 +2104,10 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
skb_tail_pointer_rsl(skb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
entry->BufferAddress = cpu_to_le32(*mapping);
entry->Length = priv->rxbuffersize;
@@ -2397,7 +2400,11 @@ static void rtl8192_rx_normal(struct net_device *dev)
skb_tail_pointer_rsl(skb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
-
+ if (pci_dma_mapping_error(priv->pdev,
+ *((dma_addr_t *)skb->cb))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
}
done:
pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 6b73843e580a..a96cd06d69dd 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
/* Belkin */
{USB_DEVICE(0x050D, 0x945A)},
+ /* ISY IWL - Belkin clone */
+ {USB_DEVICE(0x050D, 0x11F1)},
/* Corega */
{USB_DEVICE(0x07AA, 0x0047)},
/* D-Link */
diff --git a/drivers/staging/sb105x/Kconfig b/drivers/staging/sb105x/Kconfig
index ac87c5e38dee..1facad625554 100644
--- a/drivers/staging/sb105x/Kconfig
+++ b/drivers/staging/sb105x/Kconfig
@@ -2,6 +2,7 @@ config SB105X
tristate "SystemBase PCI Multiport UART"
select SERIAL_CORE
depends on PCI
+ depends on X86
help
A driver for the SystemBase Multi-2/PCI serial card
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index edb2a85b9d52..131afd0c460c 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -3054,6 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
}
break;
+#ifdef CONFIG_PARPORT
case PCI_DEVICE_ID_MP2S1P :
sbdev->nr_ports = 2;
@@ -3073,6 +3074,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
/* add PC compatible parallel port */
parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
break;
+#endif
}
ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index df9533798095..7616f058a00b 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
mutex_lock(&spk_mutex);
/* First, check if we already have it loaded. */
- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
if (strcmp(synths[i]->name, synth_name) == 0)
synth = synths[i];
@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
int i;
int status = 0;
mutex_lock(&spk_mutex);
- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
/* synth_remove() is responsible for rotating the array down */
if (in_synth == synths[i]) {
mutex_unlock(&spk_mutex);
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 543a127c7d4d..b783bfa59b1c 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -31,7 +31,7 @@
* driver should read or write to PRM/CM registers directly; they
* should rely on OMAP core code to do this.
*/
-#include <mach-omap2/cm2xxx_3xxx.h>
+#include <mach-omap2/cm3xxx.h>
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#include <dspbridge/devdefs.h>
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index b647207928b1..2f084e181d39 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -121,9 +121,13 @@ void dsp_clk_exit(void)
for (i = 0; i < DM_TIMER_CLOCKS; i++)
omap_dm_timer_free(timer[i]);
+ clk_unprepare(iva2_clk);
clk_put(iva2_clk);
+ clk_unprepare(ssi.sst_fck);
clk_put(ssi.sst_fck);
+ clk_unprepare(ssi.ssr_fck);
clk_put(ssi.ssr_fck);
+ clk_unprepare(ssi.ick);
clk_put(ssi.ick);
}
@@ -145,14 +149,21 @@ void dsp_clk_init(void)
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+ else
+ clk_prepare(iva2_clk);
ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
- if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+ if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) {
dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+ } else {
+ clk_prepare(ssi.sst_fck);
+ clk_prepare(ssi.ssr_fck);
+ clk_prepare(ssi.ick);
+ }
}
/**
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 1dce36fb828f..7ff0e6c98039 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -63,11 +63,15 @@ int dsp_wdt_init(void)
dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
if (!IS_ERR(dsp_wdt.fclk)) {
+ clk_prepare(dsp_wdt.fclk);
+
dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
if (IS_ERR(dsp_wdt.iclk)) {
clk_put(dsp_wdt.fclk);
dsp_wdt.fclk = NULL;
ret = -EFAULT;
+ } else {
+ clk_prepare(dsp_wdt.iclk);
}
} else
ret = -EFAULT;
@@ -95,10 +99,14 @@ void dsp_wdt_exit(void)
free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
tasklet_kill(&dsp_wdt.wdt3_tasklet);
- if (dsp_wdt.fclk)
+ if (dsp_wdt.fclk) {
+ clk_unprepare(dsp_wdt.fclk);
clk_put(dsp_wdt.fclk);
- if (dsp_wdt.iclk)
+ }
+ if (dsp_wdt.iclk) {
+ clk_unprepare(dsp_wdt.iclk);
clk_put(dsp_wdt.iclk);
+ }
dsp_wdt.fclk = NULL;
dsp_wdt.iclk = NULL;
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 0331178ca3b3..bf73ba26e88a 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -162,11 +162,9 @@ static struct vme_driver pio2_driver = {
static int __init pio2_init(void)
{
- int retval = 0;
-
if (bus_num == 0) {
pr_err("No cards, skipping registration\n");
- goto err_nocard;
+ return -ENODEV;
}
if (bus_num > PIO2_CARDS_MAX) {
@@ -176,15 +174,7 @@ static int __init pio2_init(void)
}
/* Register the PIO2 driver */
- retval = vme_register_driver(&pio2_driver, bus_num);
- if (retval != 0)
- goto err_reg;
-
- return retval;
-
-err_reg:
-err_nocard:
- return retval;
+ return vme_register_driver(&pio2_driver, bus_num);
}
static int pio2_match(struct vme_dev *vdev)
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 18c06a59c091..1d31eab19d16 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -638,8 +638,8 @@ int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
-int prism2_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type,
- int mbm)
+int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
@@ -665,7 +665,8 @@ exit:
return err;
}
-int prism2_get_tx_power(struct wiphy *wiphy, int *dbm)
+int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
wlandevice_t *wlandev = priv->wlandev;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index fb4a7c94aed3..f2a73bd739fb 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -265,7 +265,7 @@ out_cleanup:
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
+ int ret = 0;
size_t clen;
unsigned long handle;
struct page *page;
@@ -286,10 +286,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
ret = zram_decompress_page(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ if (ret)
goto out;
- }
}
/*
@@ -302,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec))
+ kunmap_atomic(user_mem);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
ret = 0;
@@ -321,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
@@ -332,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (unlikely(clen > max_zpage_size)) {
zram_stat_inc(&zram->stats.bad_compress);
- src = uncmem;
clen = PAGE_SIZE;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
handle = zs_malloc(zram->mem_pool, clen);
@@ -345,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ src = kmap_atomic(page);
memcpy(cmem, src, clen);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ kunmap_atomic(src);
zs_unmap_object(zram->mem_pool, handle);
@@ -358,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress);
- return 0;
-
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 000000000000..52385914b9ae
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+ tristate "Switch class support"
+ help
+ Say Y here to enable switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+ tristate "GPIO Swith support"
+ depends on GENERIC_GPIO
+ help
+ Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 000000000000..f7606ed4a719
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH) += switch_class.o
+obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 000000000000..e05fc2591147
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 000000000000..15d5f04b3dfd
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = gpio_switch_remove,
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 9ac4c151eae4..ba6091bf93fc 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
- iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
+ iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 85140f7dde1e..7d4ec02e29a9 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -212,7 +212,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
- sense_reason_t rc;
+ sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0;
u16 tg_pt_id, rtpi;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index e35dbf85841f..8e0290b38e43 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2053,7 +2053,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- sense_reason_t ret;
+ sense_reason_t ret = TCM_NO_SENSE;
int pr_holder = 0, type;
if (!se_sess || !se_lun) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c23c76ccef65..bd587b70661a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -541,9 +541,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_lun_remove_cmd(cmd);
-
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
@@ -1396,6 +1393,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+ transport_cmd_check_stop_to_fabric(se_cmd);
}
/**
@@ -1688,6 +1687,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irq(&cmd->t_state_lock);
if (!target_handle_task_attr(cmd))
@@ -2597,6 +2597,16 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
* SENSE KEY values from include/scsi/scsi.h
*/
switch (reason) {
+ case TCM_NO_SENSE:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* Not Ready */
+ buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ /* NO ADDITIONAL SENSE INFORMATION */
+ buffer[SPC_ASC_KEY_OFFSET] = 0;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0;
+ break;
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[0] = 0x70;
@@ -2743,7 +2753,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
/* ILLEGAL REQUEST */
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x80;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x08;
break;
}
/*
@@ -2804,6 +2814,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ transport_lun_remove_cmd(cmd);
+
pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
" ITT: 0x%08x\n", cmd->t_task_cdb[0],
cmd->se_tfo->get_task_tag(cmd));
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 12d6fa21e5e1..6659dd36e806 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -355,11 +355,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
tport = ft_tport_create(rdata->local_port);
if (!tport)
- return 0; /* not a target for this local port */
+ goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
- return 0;
+ goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
@@ -396,12 +396,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
/*
* OR in our service parameters with other provider (initiator), if any.
- * TBD XXX - indicate RETRY capability?
*/
fill:
fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
+
+not_target:
+ fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ spp->spp_params = htonl(fcp_parm);
+ return 0;
}
/**
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c7230aaefd4..aa2e85dc1e22 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -94,6 +94,9 @@ static void __uart_start(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
+ if (port->ops->wake_peer)
+ port->ops->wake_peer(port);
+
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4c90b510d016..640ae6c6d2d2 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -37,6 +37,7 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_W90X900
default y if ARCH_AT91
default y if ARCH_MXC
+ default y if ARCH_MXS
default y if ARCH_OMAP3
default y if ARCH_CNS3XXX
default y if ARCH_VT8500
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index caecad9213f5..8e9d31277c43 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -70,6 +70,9 @@ static int host_start(struct ci13xxx *ci)
else
ci->hcd = hcd;
+ if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+ hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
return ret;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8d809a811e16..2d92cce260d7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
.driver_info = NO_UNION_NORMAL,
},
+ { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
+ .driver_info = NO_UNION_NORMAL,
+ },
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a815fd2cc5e7..957ed2c41482 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -877,6 +877,60 @@ static int hub_hub_status(struct usb_hub *hub,
return ret;
}
+static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+ unsigned int link_status)
+{
+ return set_port_feature(hub->hdev,
+ port1 | (link_status << 3),
+ USB_PORT_FEAT_LINK_STATE);
+}
+
+/*
+ * If USB 3.0 ports are placed into the Disabled state, they will no longer
+ * detect any device connects or disconnects. This is generally not what the
+ * USB core wants, since it expects a disabled port to produce a port status
+ * change event when a new device connects.
+ *
+ * Instead, set the link state to Disabled, wait for the link to settle into
+ * that state, clear any change bits, and then put the port into the RxDetect
+ * state.
+ */
+static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+{
+ int ret;
+ int total_time;
+ u16 portchange, portstatus;
+
+ if (!hub_is_superspeed(hub->hdev))
+ return -EINVAL;
+
+ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+ if (ret) {
+ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+ port1, ret);
+ return ret;
+ }
+
+ /* Wait for the link to enter the disabled state. */
+ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+ ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (ret < 0)
+ return ret;
+
+ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_DISABLED)
+ break;
+ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+ break;
+ msleep(HUB_DEBOUNCE_STEP);
+ }
+ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
+ port1, total_time);
+
+ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+}
+
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_device *hdev = hub->hdev;
@@ -885,8 +939,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
if (hub->ports[port1 - 1]->child && set_state)
usb_set_device_state(hub->ports[port1 - 1]->child,
USB_STATE_NOTATTACHED);
- if (!hub->error && !hub_is_superspeed(hub->hdev))
- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
+ if (!hub->error) {
+ if (hub_is_superspeed(hub->hdev))
+ ret = hub_usb3_port_disable(hub, port1);
+ else
+ ret = clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ }
if (ret)
dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
port1, ret);
@@ -2440,7 +2499,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
#define HUB_LONG_RESET_TIME 200
-#define HUB_RESET_TIMEOUT 500
+#define HUB_RESET_TIMEOUT 800
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
@@ -2475,6 +2534,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
+ /* The port state is unknown until the reset completes. */
+ if ((portstatus & USB_PORT_STAT_RESET))
+ goto delay;
+
/*
* Some buggy devices require a warm reset to be issued even
* when the port appears not to be connected.
@@ -2520,11 +2583,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if ((portchange & USB_PORT_STAT_C_CONNECTION))
return -ENOTCONN;
- /* if we`ve finished resetting, then break out of
- * the loop
- */
- if (!(portstatus & USB_PORT_STAT_RESET) &&
- (portstatus & USB_PORT_STAT_ENABLE)) {
+ if ((portstatus & USB_PORT_STAT_ENABLE)) {
if (hub_is_wusb(hub))
udev->speed = USB_SPEED_WIRELESS;
else if (hub_is_superspeed(hub->hdev))
@@ -2538,10 +2597,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
return 0;
}
} else {
- if (portchange & USB_PORT_STAT_C_BH_RESET)
- return 0;
+ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+ hub_port_warm_reset_required(hub,
+ portstatus))
+ return -ENOTCONN;
+
+ return 0;
}
+delay:
/* switch to the long delay after two short delay failures */
if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
delay = HUB_LONG_RESET_TIME;
@@ -2565,14 +2629,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
msleep(10 + 40);
update_devnum(udev, 0);
hcd = bus_to_hcd(udev->bus);
- if (hcd->driver->reset_device) {
- *status = hcd->driver->reset_device(hcd, udev);
- if (*status < 0) {
- dev_err(&udev->dev, "Cannot reset "
- "HCD device state\n");
- break;
- }
- }
+ /* The xHC may think the device is already reset,
+ * so ignore the status.
+ */
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
}
/* FALL THROUGH */
case -ENOTCONN:
@@ -2580,16 +2641,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_C_RESET);
/* FIXME need disconnect() for NOTATTACHED device */
- if (warm) {
+ if (hub_is_superspeed(hub->hdev)) {
clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
- } else {
+ }
+ if (!warm)
usb_set_device_state(udev, *status
? USB_STATE_NOTATTACHED
: USB_STATE_DEFAULT);
- }
break;
}
}
@@ -2939,7 +3000,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
static int finish_port_resume(struct usb_device *udev)
{
int status = 0;
- u16 devstatus;
+ u16 devstatus = 0;
/* caller owns the udev device lock */
dev_dbg(&udev->dev, "%s\n",
@@ -2984,7 +3045,13 @@ static int finish_port_resume(struct usb_device *udev)
if (status) {
dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
status);
- } else if (udev->actconfig) {
+ /*
+ * There are a few quirky devices which violate the standard
+ * by claiming to have remote wakeup enabled after a reset,
+ * which crash if the feature is cleared, hence check for
+ * udev->reset_resume
+ */
+ } else if (udev->actconfig && !udev->reset_resume) {
le16_to_cpus(&devstatus);
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
status = usb_control_msg(udev,
@@ -4638,9 +4705,14 @@ static void hub_events(void)
* SS.Inactive state.
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
+ int status;
+
dev_dbg(hub_dev, "warm reset port %d\n", i);
- hub_port_reset(hub, i, NULL,
+ status = hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
+ if (status < 0)
+ hub_port_disable(hub, i, 1);
+ connect_change = 0;
}
if (connect_change)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index fdefd9c7f7af..3113c1d71442 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 92604b4f9712..5945aadaa1c9 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -56,7 +56,7 @@
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
- .offset = DWC3_ ##nm, \
+ .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
}
static const struct debugfs_reg32 dwc3_regs[] = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 14625fd2cecd..f74fc7f418e5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -799,6 +799,15 @@ config USB_G_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+config USB_G_ANDROID
+ boolean "Android Composite Gadget"
+ help
+ The Android Composite Gadget supports multiple USB
+ functions: adb, acm, mass storage, mtp, accessory
+ and rndis.
+ Each function can be configured and enabled/disabled
+ dynamically from userspace through a sysfs interface.
+
config USB_CDC_COMPOSITE
tristate "CDC Composite Device (Ethernet and ACM)"
depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 8b4acfd92aa3..a0fed8528bee 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -55,6 +55,7 @@ g_webcam-y := webcam.o
g_ncm-y := ncm.o
g_acm_ms-y := acm_ms.o
g_tcm_usb_gadget-y := tcm_usb_gadget.o
+g_android-y := android.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -74,3 +75,5 @@ obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o
obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o
+obj-$(CONFIG_USB_G_ANDROID) += g_android.o
+
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index fc0ec5e0d58e..d9f6b9372491 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3231,7 +3231,7 @@ static int udc_pci_probe(
}
if (!pdev->irq) {
- dev_err(&dev->pdev->dev, "irq not set\n");
+ dev_err(&pdev->dev, "irq not set\n");
kfree(dev);
dev = NULL;
retval = -ENODEV;
@@ -3250,7 +3250,7 @@ static int udc_pci_probe(
dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
- dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+ dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
kfree(dev);
dev = NULL;
retval = -EBUSY;
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 000000000000..9cebebcb8185
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1568 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ * Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+#include "f_fs.c"
+#include "f_audio_source.c"
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_adb.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID 0x18D1
+#define PRODUCT_ID 0x0001
+
+struct android_usb_function {
+ char *name;
+ void *config;
+
+ struct device *dev;
+ char *dev_name;
+ struct device_attribute **attributes;
+
+ /* for android_dev.enabled_functions */
+ struct list_head enabled_list;
+
+ /* Optional: initialization during gadget bind */
+ int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+ /* Optional: cleanup during gadget unbind */
+ void (*cleanup)(struct android_usb_function *);
+ /* Optional: called when the function is added the list of
+ * enabled functions */
+ void (*enable)(struct android_usb_function *);
+ /* Optional: called when it is removed */
+ void (*disable)(struct android_usb_function *);
+
+ int (*bind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+
+ /* Optional: called when the configuration is removed */
+ void (*unbind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+ /* Optional: handle ctrl requests before the device is configured */
+ int (*ctrlrequest)(struct android_usb_function *,
+ struct usb_composite_dev *,
+ const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+ struct android_usb_function **functions;
+ struct list_head enabled_functions;
+ struct usb_composite_dev *cdev;
+ struct device *dev;
+
+ bool enabled;
+ int disable_depth;
+ struct mutex mutex;
+ bool connected;
+ bool sw_connected;
+ struct work_struct work;
+ char ffs_aliases[256];
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_SERIAL_IDX 2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_string,
+ [STRING_PRODUCT_IDX].s = product_string,
+ [STRING_SERIAL_IDX].s = serial_string,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof(device_desc),
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .idVendor = __constant_cpu_to_le16(VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
+ .bcdDevice = __constant_cpu_to_le16(0xffff),
+ .bNumConfigurations = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+ .label = "android",
+ .unbind = android_unbind_config,
+ .bConfigurationValue = 1,
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 0xFA, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+ struct android_dev *dev = container_of(data, struct android_dev, work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+ char *connected[2] = { "USB_STATE=CONNECTED", NULL };
+ char *configured[2] = { "USB_STATE=CONFIGURED", NULL };
+ char **uevent_envp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ uevent_envp = configured;
+ else if (dev->connected != dev->sw_connected)
+ uevent_envp = dev->connected ? connected : disconnected;
+ dev->sw_connected = dev->connected;
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ if (uevent_envp) {
+ kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+ pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+ } else {
+ pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+ dev->connected, dev->sw_connected, cdev->config);
+ }
+}
+
+static void android_enable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (WARN_ON(!dev->disable_depth))
+ return;
+
+ if (--dev->disable_depth == 0) {
+ usb_add_config(cdev, &android_config_driver,
+ android_bind_config);
+ usb_gadget_connect(cdev->gadget);
+ }
+}
+
+static void android_disable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (dev->disable_depth++ == 0) {
+ usb_gadget_disconnect(cdev->gadget);
+ /* Cancel pending control requests */
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+ usb_remove_config(cdev, &android_config_driver);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+struct functionfs_config {
+ bool opened;
+ bool enabled;
+ struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+ functionfs_cleanup();
+ kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = true;
+
+ /* Disable the gadget until the function is ready */
+ if (!config->opened)
+ android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!config->opened)
+ android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct functionfs_config *config = f->config;
+ return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = _android_dev;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+ ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct android_dev *dev = _android_dev;
+ char buff[256];
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ strlcpy(buff, buf, sizeof(buff));
+ strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+ ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+ &dev_attr_aliases,
+ NULL
+};
+
+static struct android_usb_function ffs_function = {
+ .name = "ffs",
+ .init = ffs_function_init,
+ .enable = ffs_function_enable,
+ .disable = ffs_function_disable,
+ .cleanup = ffs_function_cleanup,
+ .bind_config = ffs_function_bind_config,
+ .attributes = ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ ret = functionfs_bind(ffs, dev->cdev);
+ if (ret)
+ goto err;
+
+ config->data = ffs;
+ config->opened = true;
+
+ if (config->enabled)
+ android_enable(dev);
+
+err:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ if (config->enabled)
+ android_disable(dev);
+
+ config->opened = false;
+ config->data = NULL;
+
+ functionfs_unbind(ffs);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+
+static void *functionfs_acquire_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
+{
+}
+
+
+
+struct adb_data {
+ bool opened;
+ bool enabled;
+};
+
+static int
+adb_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct adb_data), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return adb_setup();
+}
+
+static void adb_function_cleanup(struct android_usb_function *f)
+{
+ adb_cleanup();
+ kfree(f->config);
+}
+
+static int
+adb_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return adb_bind_config(c);
+}
+
+static void adb_android_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = f->config;
+
+ data->enabled = true;
+
+ /* Disable the gadget until adbd is ready */
+ if (!data->opened)
+ android_disable(dev);
+}
+
+static void adb_android_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = f->config;
+
+ data->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!data->opened)
+ android_enable(dev);
+}
+
+static struct android_usb_function adb_function = {
+ .name = "adb",
+ .enable = adb_android_function_enable,
+ .disable = adb_android_function_disable,
+ .init = adb_function_init,
+ .cleanup = adb_function_cleanup,
+ .bind_config = adb_function_bind_config,
+};
+
+static void adb_ready_callback(void)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = adb_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ data->opened = true;
+
+ if (data->enabled)
+ android_enable(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void adb_closed_callback(void)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = adb_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ data->opened = false;
+
+ if (data->enabled)
+ android_disable(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+ int instances;
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES);
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+ gserial_cleanup();
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int i;
+ int ret = 0;
+ struct acm_function_config *config = f->config;
+
+ for (i = 0; i < config->instances; i++) {
+ ret = acm_bind_config(c, i);
+ if (ret) {
+ pr_err("Could not bind acm%u config\n", i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ int value;
+
+ sscanf(buf, "%d", &value);
+ if (value > MAX_ACM_INSTANCES)
+ value = MAX_ACM_INSTANCES;
+ config->instances = value;
+ return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+ acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+ &dev_attr_instances,
+ NULL
+};
+
+static struct android_usb_function acm_function = {
+ .name = "acm",
+ .init = acm_function_init,
+ .cleanup = acm_function_cleanup,
+ .bind_config = acm_function_bind_config,
+ .attributes = acm_function_attributes,
+};
+
+
+static int
+mtp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+ mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ /* nothing to do - initialization is handled by mtp_function_init */
+ return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+ /* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+ .name = "mtp",
+ .init = mtp_function_init,
+ .cleanup = mtp_function_cleanup,
+ .bind_config = mtp_function_bind_config,
+ .ctrlrequest = mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+ .name = "ptp",
+ .init = ptp_function_init,
+ .cleanup = ptp_function_cleanup,
+ .bind_config = ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ char manufacturer[256];
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ bool wceis;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+ return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct rndis_function_config *rndis = f->config;
+
+ if (!rndis) {
+ pr_err("%s: rndis_pdata\n", __func__);
+ return -1;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+ ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+
+ if (rndis->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_control_intf.bInterfaceSubClass = 0x01;
+ rndis_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ return rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+ rndis->manufacturer);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ gether_cleanup();
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+
+ if (size >= sizeof(config->manufacturer))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->manufacturer) == 1)
+ return size;
+ return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+ rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%d", &value) == 1) {
+ config->wceis = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+ rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+ return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+ (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+ (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+ return size;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+ rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%04x", &value) == 1) {
+ config->vendorID = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+ rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+ &dev_attr_manufacturer,
+ &dev_attr_wceis,
+ &dev_attr_ethaddr,
+ &dev_attr_vendorID,
+ NULL
+};
+
+static struct android_usb_function rndis_function = {
+ .name = "rndis",
+ .init = rndis_function_init,
+ .cleanup = rndis_function_cleanup,
+ .bind_config = rndis_function_bind_config,
+ .unbind_config = rndis_function_unbind_config,
+ .attributes = rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+ struct fsg_config fsg;
+ struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct mass_storage_function_config *config;
+ struct fsg_common *common;
+ int err;
+
+ config = kzalloc(sizeof(struct mass_storage_function_config),
+ GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ config->fsg.nluns = 1;
+ config->fsg.luns[0].removable = 1;
+
+ common = fsg_common_init(NULL, cdev, &config->fsg);
+ if (IS_ERR(common)) {
+ kfree(config);
+ return PTR_ERR(common);
+ }
+
+ err = sysfs_create_link(&f->dev->kobj,
+ &common->luns[0].dev.kobj,
+ "lun");
+ if (err) {
+ kfree(config);
+ return err;
+ }
+
+ config->common = common;
+ f->config = config;
+ return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct mass_storage_function_config *config = f->config;
+ return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ if (size >= sizeof(config->common->inquiry_string))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+ return -EINVAL;
+ return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+ mass_storage_inquiry_show,
+ mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+ &dev_attr_inquiry_string,
+ NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+ .name = "mass_storage",
+ .init = mass_storage_function_init,
+ .cleanup = mass_storage_function_cleanup,
+ .bind_config = mass_storage_function_bind_config,
+ .attributes = mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+ acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+ .name = "accessory",
+ .init = accessory_function_init,
+ .cleanup = accessory_function_cleanup,
+ .bind_config = accessory_function_bind_config,
+ .ctrlrequest = accessory_function_ctrlrequest,
+};
+
+static int audio_source_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct audio_source_config *config;
+
+ config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+ config->card = -1;
+ config->device = -1;
+ f->config = config;
+ return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ config->card = -1;
+ config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct audio_source_config *config = f->config;
+
+ /* print PCM card and device numbers */
+ return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+ &dev_attr_pcm,
+ NULL
+};
+
+static struct android_usb_function audio_source_function = {
+ .name = "audio_source",
+ .init = audio_source_function_init,
+ .cleanup = audio_source_function_cleanup,
+ .bind_config = audio_source_function_bind_config,
+ .unbind_config = audio_source_function_unbind_config,
+ .attributes = audio_source_function_attributes,
+};
+
+static struct android_usb_function *supported_functions[] = {
+ &ffs_function,
+ &adb_function,
+ &acm_function,
+ &mtp_function,
+ &ptp_function,
+ &rndis_function,
+ &mass_storage_function,
+ &accessory_function,
+ &audio_source_function,
+ NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+ struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct android_usb_function *f;
+ struct device_attribute **attrs;
+ struct device_attribute *attr;
+ int err;
+ int index = 0;
+
+ for (; (f = *functions++); index++) {
+ f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+ f->dev = device_create(android_class, dev->dev,
+ MKDEV(0, index), f, f->dev_name);
+ if (IS_ERR(f->dev)) {
+ pr_err("%s: Failed to create dev %s", __func__,
+ f->dev_name);
+ err = PTR_ERR(f->dev);
+ goto err_create;
+ }
+
+ if (f->init) {
+ err = f->init(f, cdev);
+ if (err) {
+ pr_err("%s: Failed to init %s", __func__,
+ f->name);
+ goto err_out;
+ }
+ }
+
+ attrs = f->attributes;
+ if (attrs) {
+ while ((attr = *attrs++) && !err)
+ err = device_create_file(f->dev, attr);
+ }
+ if (err) {
+ pr_err("%s: Failed to create function %s attributes",
+ __func__, f->name);
+ goto err_out;
+ }
+ }
+ return 0;
+
+err_out:
+ device_destroy(android_class, f->dev->devt);
+err_create:
+ kfree(f->dev_name);
+ return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+ struct android_usb_function *f;
+
+ while (*functions) {
+ f = *functions++;
+
+ if (f->dev) {
+ device_destroy(android_class, f->dev->devt);
+ kfree(f->dev_name);
+ }
+
+ if (f->cleanup)
+ f->cleanup(f);
+ }
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+ int ret;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ ret = f->bind_config(f, c);
+ if (ret) {
+ pr_err("%s: %s failed", __func__, f->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->unbind_config)
+ f->unbind_config(f, c);
+ }
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+ struct android_usb_function **functions = dev->functions;
+ struct android_usb_function *f;
+ while ((f = *functions++)) {
+ if (!strcmp(name, f->name)) {
+ list_add_tail(&f->enabled_list,
+ &dev->enabled_functions);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct android_usb_function *f;
+ char *buff = buf;
+
+ mutex_lock(&dev->mutex);
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+ buff += sprintf(buff, "%s,", f->name);
+
+ mutex_unlock(&dev->mutex);
+
+ if (buff != buf)
+ *(buff-1) = '\n';
+ return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ char *name;
+ char buf[256], *b;
+ char aliases[256], *a;
+ int err;
+ int is_ffs;
+ int ffs_enabled = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&dev->enabled_functions);
+
+ strlcpy(buf, buff, sizeof(buf));
+ b = strim(buf);
+
+ while (b) {
+ name = strsep(&b, ",");
+ if (!name)
+ continue;
+
+ is_ffs = 0;
+ strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+ a = aliases;
+
+ while (a) {
+ char *alias = strsep(&a, ",");
+ if (alias && !strcmp(name, alias)) {
+ is_ffs = 1;
+ break;
+ }
+ }
+
+ if (is_ffs) {
+ if (ffs_enabled)
+ continue;
+ err = android_enable_function(dev, "ffs");
+ if (err)
+ pr_err("android_usb: Cannot enable ffs (%d)",
+ err);
+ else
+ ffs_enabled = 1;
+ continue;
+ }
+
+ err = android_enable_function(dev, name);
+ if (err)
+ pr_err("android_usb: Cannot enable '%s' (%d)",
+ name, err);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct android_usb_function *f;
+ int enabled = 0;
+
+
+ if (!cdev)
+ return -ENODEV;
+
+ mutex_lock(&dev->mutex);
+
+ sscanf(buff, "%d", &enabled);
+ if (enabled && !dev->enabled) {
+ /*
+ * Update values in composite driver's copy of
+ * device descriptor.
+ */
+ cdev->desc.idVendor = device_desc.idVendor;
+ cdev->desc.idProduct = device_desc.idProduct;
+ cdev->desc.bcdDevice = device_desc.bcdDevice;
+ cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+ cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+ cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->enable)
+ f->enable(f);
+ }
+ android_enable(dev);
+ dev->enabled = true;
+ } else if (!enabled && dev->enabled) {
+ android_disable(dev);
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->disable)
+ f->disable(f);
+ }
+ dev->enabled = false;
+ } else {
+ pr_err("android_usb: already %s\n",
+ dev->enabled ? "enabled" : "disabled");
+ }
+
+ mutex_unlock(&dev->mutex);
+ return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *state = "DISCONNECTED";
+ unsigned long flags;
+
+ if (!cdev)
+ goto out;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ state = "CONFIGURED";
+ else if (dev->connected)
+ state = "CONNECTED";
+ spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+ return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, format_string, device_desc.field); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ int value; \
+ if (sscanf(buf, format_string, &value) == 1) { \
+ device_desc.field = value; \
+ return size; \
+ } \
+ return -1; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%s", buffer); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ if (size >= sizeof(buffer)) \
+ return -EINVAL; \
+ return strlcpy(buffer, buf, sizeof(buffer)); \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+ functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+ &dev_attr_idVendor,
+ &dev_attr_idProduct,
+ &dev_attr_bcdDevice,
+ &dev_attr_bDeviceClass,
+ &dev_attr_bDeviceSubClass,
+ &dev_attr_bDeviceProtocol,
+ &dev_attr_iManufacturer,
+ &dev_attr_iProduct,
+ &dev_attr_iSerial,
+ &dev_attr_functions,
+ &dev_attr_enable,
+ &dev_attr_state,
+ NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+ int ret = 0;
+
+ ret = android_bind_enabled_functions(dev, c);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+
+ android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_gadget *gadget = cdev->gadget;
+ int id, ret;
+
+ /*
+ * Start disconnected. Userspace will connect the gadget once
+ * it is done configuring the functions.
+ */
+ usb_gadget_disconnect(gadget);
+
+ ret = android_init_functions(dev->functions, cdev);
+ if (ret)
+ return ret;
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_MANUFACTURER_IDX].id = id;
+ device_desc.iManufacturer = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_PRODUCT_IDX].id = id;
+ device_desc.iProduct = id;
+
+ /* Default strings - should be updated by userspace */
+ strncpy(manufacturer_string, "Android", sizeof(manufacturer_string)-1);
+ strncpy(product_string, "Android", sizeof(product_string) - 1);
+ strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_SERIAL_IDX].id = id;
+ device_desc.iSerialNumber = id;
+
+ usb_gadget_set_selfpowered(gadget);
+ dev->cdev = cdev;
+
+ return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+
+ cancel_work_sync(&dev->work);
+ android_cleanup_functions(dev->functions);
+ return 0;
+}
+
+static struct usb_composite_driver android_usb_driver = {
+ .name = "android_usb",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = android_bind,
+ .unbind = android_usb_unbind,
+ .max_speed = USB_SPEED_HIGH,
+};
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_request *req = cdev->req;
+ struct android_usb_function *f;
+ int value = -EOPNOTSUPP;
+ unsigned long flags;
+
+ req->zero = 0;
+ req->complete = composite_setup_complete;
+ req->length = 0;
+ gadget->ep0->driver_data = cdev;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->ctrlrequest) {
+ value = f->ctrlrequest(f, cdev, c);
+ if (value >= 0)
+ break;
+ }
+ }
+
+ /* Special case the accessory function.
+ * It needs to handle control requests before it is enabled.
+ */
+ if (value < 0)
+ value = acc_ctrlrequest(cdev, c);
+
+ if (value < 0)
+ value = composite_setup(gadget, c);
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (!dev->connected) {
+ dev->connected = 1;
+ schedule_work(&dev->work);
+ } else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+ cdev->config) {
+ schedule_work(&dev->work);
+ }
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ unsigned long flags;
+
+ composite_disconnect(gadget);
+ /* accessory HID support can be active while the
+ accessory function is not actually enabled,
+ so we need to inform it when we are disconnected.
+ */
+ acc_disconnect();
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ dev->connected = 0;
+ schedule_work(&dev->work);
+ spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+static int android_create_device(struct android_dev *dev)
+{
+ struct device_attribute **attrs = android_usb_attributes;
+ struct device_attribute *attr;
+ int err;
+
+ dev->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, "android0");
+ if (IS_ERR(dev->dev))
+ return PTR_ERR(dev->dev);
+
+ dev_set_drvdata(dev->dev, dev);
+
+ while ((attr = *attrs++)) {
+ err = device_create_file(dev->dev, attr);
+ if (err) {
+ device_destroy(android_class, dev->dev->devt);
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static int __init init(void)
+{
+ struct android_dev *dev;
+ int err;
+
+ android_class = class_create(THIS_MODULE, "android_usb");
+ if (IS_ERR(android_class))
+ return PTR_ERR(android_class);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->disable_depth = 1;
+ dev->functions = supported_functions;
+ INIT_LIST_HEAD(&dev->enabled_functions);
+ INIT_WORK(&dev->work, android_work);
+ mutex_init(&dev->mutex);
+
+ err = android_create_device(dev);
+ if (err) {
+ class_destroy(android_class);
+ kfree(dev);
+ return err;
+ }
+
+ _android_dev = dev;
+
+ /* Override composite driver functions */
+ composite_driver_template.setup = android_setup;
+ composite_driver_template.disconnect = android_disconnect;
+
+ return usb_composite_probe(&android_usb_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&android_usb_driver);
+ class_destroy(android_class);
+ kfree(_android_dev);
+ _android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2a6bfe759c29..c6f3311c9131 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1528,8 +1528,11 @@ composite_resume(struct usb_gadget *gadget)
}
/*-------------------------------------------------------------------------*/
-
+#if IS_ENABLED(CONFIG_USB_G_ANDROID)
+static struct usb_gadget_driver composite_driver_template = {
+#else
static const struct usb_gadget_driver composite_driver_template = {
+#endif
.bind = composite_bind,
.unbind = composite_unbind,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 95d584dbed13..8cf0c0f6fa1f 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -130,10 +130,7 @@ static const char ep0name[] = "ep0";
static const char *const ep_name[] = {
ep0name, /* everyone has ep0 */
- /* act like a net2280: high speed, six configurable endpoints */
- "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
-
- /* or like pxa250: fifteen fixed function endpoints */
+ /* act like a pxa250: fifteen fixed function endpoints */
"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
@@ -141,6 +138,10 @@ static const char *const ep_name[] = {
/* or like sa1100: two fixed function endpoints */
"ep1out-bulk", "ep2in-bulk",
+
+ /* and now some generic EPs so we have enough in multi config */
+ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
+ "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
};
#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 000000000000..092964c2b506
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,1180 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE 16384
+#define ACC_STRING_SIZE 256
+
+#define PROTOCOL_VERSION 2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+ struct list_head list;
+ struct hid_device *hid;
+ struct acc_dev *dev;
+ /* accessory defined ID */
+ int id;
+ /* HID report descriptor */
+ u8 *report_desc;
+ /* length of HID report descriptor */
+ int report_desc_len;
+ /* number of bytes of report_desc we have received so far */
+ int report_desc_offset;
+};
+
+struct acc_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ /* set to 1 when we connect */
+ int online:1;
+ /* Set to 1 when we disconnect.
+ * Not cleared until our file is closed.
+ */
+ int disconnected:1;
+
+ /* strings sent by the host */
+ char manufacturer[ACC_STRING_SIZE];
+ char model[ACC_STRING_SIZE];
+ char description[ACC_STRING_SIZE];
+ char version[ACC_STRING_SIZE];
+ char uri[ACC_STRING_SIZE];
+ char serial[ACC_STRING_SIZE];
+
+ /* for acc_complete_set_string */
+ int string_index;
+
+ /* set to 1 if we have a pending start request */
+ int start_requested;
+
+ int audio_mode;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* delayed work for handling ACCESSORY_START */
+ struct delayed_work start_work;
+
+ /* worker for registering and unregistering hid devices */
+ struct work_struct hid_work;
+
+ /* list of active HID devices */
+ struct list_head hid_list;
+
+ /* list of new HID devices to register */
+ struct list_head new_hid_list;
+
+ /* list of dead HID devices to unregister */
+ struct list_head dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+ NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+ [INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+ &acc_string_table,
+ NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+ return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+ dev->online = 0;
+ dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = ep->driver_data;
+ char *string_dest = NULL;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_string, err %d\n", req->status);
+ return;
+ }
+
+ switch (dev->string_index) {
+ case ACCESSORY_STRING_MANUFACTURER:
+ string_dest = dev->manufacturer;
+ break;
+ case ACCESSORY_STRING_MODEL:
+ string_dest = dev->model;
+ break;
+ case ACCESSORY_STRING_DESCRIPTION:
+ string_dest = dev->description;
+ break;
+ case ACCESSORY_STRING_VERSION:
+ string_dest = dev->version;
+ break;
+ case ACCESSORY_STRING_URI:
+ string_dest = dev->uri;
+ break;
+ case ACCESSORY_STRING_SERIAL:
+ string_dest = dev->serial;
+ break;
+ }
+ if (string_dest) {
+ unsigned long flags;
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ }
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ struct acc_dev *dev = hid->dev;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_hid_report_desc, err %d\n",
+ req->status);
+ return;
+ }
+
+ memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+ hid->report_desc_offset += length;
+ if (hid->report_desc_offset == hid->report_desc_len) {
+ /* After we have received the entire report descriptor
+ * we schedule work to initialize the HID device
+ */
+ schedule_work(&dev->hid_work);
+ }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+ return;
+ }
+
+ hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+ struct acc_hid_dev *hdev = hid->driver_data;
+
+ hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+ return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+ .parse = acc_hid_parse,
+ .start = acc_hid_start,
+ .stop = acc_hid_stop,
+ .open = acc_hid_open,
+ .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+ int id, int desc_len)
+{
+ struct acc_hid_dev *hdev;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+ if (!hdev)
+ return NULL;
+ hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+ if (!hdev->report_desc) {
+ kfree(hdev);
+ return NULL;
+ }
+ hdev->dev = dev;
+ hdev->id = id;
+ hdev->report_desc_len = desc_len;
+
+ return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+ struct acc_hid_dev *hid;
+
+ list_for_each_entry(hid, list, list) {
+ if (hid->id == id)
+ return hid;
+ }
+ return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ /* report descriptor length must be > 0 */
+ if (desc_length <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* replace HID if one already exists with this ID */
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (hid)
+ list_move(&hid->list, &dev->dead_hid_list);
+
+ hid = acc_hid_new(dev, id, desc_length);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOMEM;
+ }
+
+ list_add(&hid->list, &dev->new_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* schedule work to register the HID device */
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EINVAL;
+ }
+
+ list_move(&hid->list, &dev->dead_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_in;
+ req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_out;
+ dev->rx_req[i] = req;
+ }
+
+ return 0;
+
+fail:
+ pr_err("acc_bind() could not allocate requests\n");
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+ return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ pr_debug("acc_read(%d)\n", count);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (count > BULK_BUFFER_SIZE)
+ count = BULK_BUFFER_SIZE;
+
+ /* we will block until we're online */
+ pr_debug("acc_read: waiting for online\n");
+ ret = wait_event_interruptible(dev->read_wq, dev->online);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->online) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ pr_debug("acc_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("acc_write(%d)\n", count);
+
+ if (!dev->online || dev->disconnected)
+ return -ENODEV;
+
+ while (count > 0) {
+ if (!dev->online) {
+ pr_debug("acc_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > BULK_BUFFER_SIZE)
+ xfer = BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("acc_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ req_put(dev, &dev->tx_idle, req);
+
+ pr_debug("acc_write returning %d\n", r);
+ return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct acc_dev *dev = fp->private_data;
+ char *src = NULL;
+ int ret;
+
+ switch (code) {
+ case ACCESSORY_GET_STRING_MANUFACTURER:
+ src = dev->manufacturer;
+ break;
+ case ACCESSORY_GET_STRING_MODEL:
+ src = dev->model;
+ break;
+ case ACCESSORY_GET_STRING_DESCRIPTION:
+ src = dev->description;
+ break;
+ case ACCESSORY_GET_STRING_VERSION:
+ src = dev->version;
+ break;
+ case ACCESSORY_GET_STRING_URI:
+ src = dev->uri;
+ break;
+ case ACCESSORY_GET_STRING_SERIAL:
+ src = dev->serial;
+ break;
+ case ACCESSORY_IS_START_REQUESTED:
+ return dev->start_requested;
+ case ACCESSORY_GET_AUDIO_MODE:
+ return dev->audio_mode;
+ }
+ if (!src)
+ return -EINVAL;
+
+ ret = strlen(src) + 1;
+ if (copy_to_user((void __user *)value, src, ret))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_open\n");
+ if (atomic_xchg(&_acc_dev->open_excl, 1))
+ return -EBUSY;
+
+ _acc_dev->disconnected = 0;
+ fp->private_data = _acc_dev;
+ return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_release\n");
+
+ WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+ _acc_dev->disconnected = 0;
+ return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+ .owner = THIS_MODULE,
+ .read = acc_read,
+ .write = acc_write,
+ .unlocked_ioctl = acc_ioctl,
+ .open = acc_open,
+ .release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_accessory",
+ .fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+ { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+ { }
+};
+
+static struct hid_driver acc_hid_driver = {
+ .name = "USB accessory",
+ .id_table = acc_hid_table,
+ .probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct acc_dev *dev = _acc_dev;
+ int value = -EOPNOTSUPP;
+ struct acc_hid_dev *hid;
+ int offset;
+ u8 b_requestType = ctrl->bRequestType;
+ u8 b_request = ctrl->bRequest;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+/*
+ printk(KERN_INFO "acc_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ b_requestType, b_request,
+ w_value, w_index, w_length);
+*/
+
+ if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_START) {
+ dev->start_requested = 1;
+ schedule_delayed_work(
+ &dev->start_work, msecs_to_jiffies(10));
+ value = 0;
+ } else if (b_request == ACCESSORY_SEND_STRING) {
+ dev->string_index = w_index;
+ cdev->gadget->ep0->driver_data = dev;
+ cdev->req->complete = acc_complete_set_string;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+ w_index == 0 && w_length == 0) {
+ dev->audio_mode = w_value;
+ value = 0;
+ } else if (b_request == ACCESSORY_REGISTER_HID) {
+ value = acc_register_hid(dev, w_value, w_index);
+ } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+ value = acc_unregister_hid(dev, w_value);
+ } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->new_hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ offset = w_index;
+ if (offset != hid->report_desc_offset
+ || offset + w_length > hid->report_desc_len) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_set_hid_report_desc;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_send_hid_event;
+ value = w_length;
+ }
+ } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_GET_PROTOCOL) {
+ *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+ value = sizeof(u16);
+
+ /* clear any string left over from a previous session */
+ memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+ memset(dev->model, 0, sizeof(dev->model));
+ memset(dev->description, 0, sizeof(dev->description));
+ memset(dev->version, 0, sizeof(dev->version));
+ memset(dev->uri, 0, sizeof(dev->uri));
+ memset(dev->serial, 0, sizeof(dev->serial));
+ dev->start_requested = 0;
+ dev->audio_mode = 0;
+ }
+ }
+
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "%s setup response queue error\n",
+ __func__);
+ }
+
+err:
+ if (value == -EOPNOTSUPP)
+ VDBG(cdev,
+ "unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct acc_dev *dev = func_to_dev(f);
+ int id;
+ int ret;
+
+ DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+ ret = hid_register_driver(&acc_hid_driver);
+ if (ret)
+ return ret;
+
+ dev->start_requested = 0;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ acc_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+ &acc_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ acc_highspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_highspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+ struct acc_hid_dev *hid;
+ struct list_head *entry, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_safe(entry, temp, &dev->hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+ hid_unregister_driver(&acc_hid_driver);
+ kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+
+ acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+ char *envp[2] = { "ACCESSORY=START", NULL };
+ kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &acc_hid_ll_driver;
+ hid->dev.parent = acc_device.this_device;
+
+ hid->bus = BUS_USB;
+ hid->vendor = HID_ANY_ID;
+ hid->product = HID_ANY_ID;
+ hid->driver_data = hdev;
+ ret = hid_add_device(hid);
+ if (ret) {
+ pr_err("can't add hid device: %d\n", ret);
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ hdev->hid = hid;
+ return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+ kfree(hid->report_desc);
+ kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+ struct acc_dev *dev = _acc_dev;
+ struct list_head *entry, *temp;
+ struct acc_hid_dev *hid;
+ struct list_head new_list, dead_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&new_list);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* copy hids that are ready for initialization to new_list */
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (hid->report_desc_offset == hid->report_desc_len)
+ list_move(&hid->list, &new_list);
+ }
+
+ if (list_empty(&dev->dead_hid_list)) {
+ INIT_LIST_HEAD(&dead_list);
+ } else {
+ /* move all of dev->dead_hid_list to dead_list */
+ dead_list.prev = dev->dead_hid_list.prev;
+ dead_list.next = dev->dead_hid_list.next;
+ dead_list.next->prev = &dead_list;
+ dead_list.prev->next = &dead_list;
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* register new HID devices */
+ list_for_each_safe(entry, temp, &new_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (acc_hid_init(hid)) {
+ pr_err("can't add HID device %p\n", hid);
+ acc_hid_delete(hid);
+ } else {
+ spin_lock_irqsave(&dev->lock, flags);
+ list_move(&hid->list, &dev->hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ }
+
+ /* remove dead HID devices */
+ list_for_each_safe(entry, temp, &dead_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ if (hid->hid)
+ hid_destroy_device(hid->hid);
+ acc_hid_delete(hid);
+ }
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "acc_function_disable\n");
+ acc_set_disconnected(dev);
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+ struct acc_dev *dev = _acc_dev;
+ int ret;
+
+ printk(KERN_INFO "acc_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ acc_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "accessory";
+ dev->function.strings = acc_strings,
+ dev->function.fs_descriptors = fs_acc_descs;
+ dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.bind = acc_function_bind;
+ dev->function.unbind = acc_function_unbind;
+ dev->function.set_alt = acc_function_set_alt;
+ dev->function.disable = acc_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+ struct acc_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ atomic_set(&dev->open_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->hid_list);
+ INIT_LIST_HEAD(&dev->new_hid_list);
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+ INIT_WORK(&dev->hid_work, acc_hid_work);
+
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
+ ret = misc_register(&acc_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ pr_err("USB accessory gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void acc_disconnect(void)
+{
+ /* unregister all HID devices if USB is disconnected */
+ kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+ misc_deregister(&acc_device);
+ kfree(_acc_dev);
+ _acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
new file mode 100644
index 000000000000..a1d70d276953
--- /dev/null
+++ b/drivers/usb/gadget/f_adb.c
@@ -0,0 +1,619 @@
+/*
+ * Gadget Driver for Android ADB
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define ADB_BULK_BUFFER_SIZE 4096
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+static const char adb_shortname[] = "android_adb";
+
+struct adb_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ int online;
+ int error;
+
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+};
+
+static struct usb_interface_descriptor adb_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0x42,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_out_desc,
+ NULL,
+};
+
+static void adb_ready_callback(void);
+static void adb_closed_callback(void);
+
+/* temporary variable used between adb_open() and adb_gadget_bind() */
+static struct adb_dev *_adb_dev;
+
+static inline struct adb_dev *func_to_adb(struct usb_function *f)
+{
+ return container_of(f, struct adb_dev, function);
+}
+
+
+static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int adb_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void adb_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void adb_req_put(struct adb_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ if (req->status != 0)
+ dev->error = 1;
+
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0 && req->status != -ECONNRESET)
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+}
+
+static int adb_create_bulk_endpoints(struct adb_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_out;
+ dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_in;
+ adb_req_put(dev, &dev->tx_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "adb_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("adb_read(%d)\n", count);
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (count > ADB_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (adb_lock(&dev->read_excl))
+ return -EBUSY;
+
+ /* we will block until we're online */
+ while (!(dev->online || dev->error)) {
+ pr_debug("adb_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ (dev->online || dev->error));
+ if (ret < 0) {
+ adb_unlock(&dev->read_excl);
+ return ret;
+ }
+ }
+ if (dev->error) {
+ r = -EIO;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req;
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret);
+ r = -EIO;
+ dev->error = 1;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ if (ret != -ERESTARTSYS)
+ dev->error = 1;
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (!dev->error) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ } else
+ r = -EIO;
+
+done:
+ adb_unlock(&dev->read_excl);
+ pr_debug("adb_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ if (!_adb_dev)
+ return -ENODEV;
+ pr_debug("adb_write(%d)\n", count);
+
+ if (adb_lock(&dev->write_excl))
+ return -EBUSY;
+
+ while (count > 0) {
+ if (dev->error) {
+ pr_debug("adb_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = adb_req_get(dev, &dev->tx_idle)) || dev->error);
+
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ if (req != 0) {
+ if (count > ADB_BULK_BUFFER_SIZE)
+ xfer = ADB_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_write: xfer error %d\n", ret);
+ dev->error = 1;
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+ }
+
+ if (req)
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ adb_unlock(&dev->write_excl);
+ pr_debug("adb_write returning %d\n", r);
+ return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+ pr_info("adb_open\n");
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (adb_lock(&_adb_dev->open_excl))
+ return -EBUSY;
+
+ fp->private_data = _adb_dev;
+
+ /* clear the error latch */
+ _adb_dev->error = 0;
+
+ adb_ready_callback();
+
+ return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+ pr_info("adb_release\n");
+
+ adb_closed_callback();
+
+ adb_unlock(&_adb_dev->open_excl);
+ return 0;
+}
+
+/* file operations for ADB device /dev/android_adb */
+static const struct file_operations adb_fops = {
+ .owner = THIS_MODULE,
+ .read = adb_read,
+ .write = adb_write,
+ .open = adb_open,
+ .release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = adb_shortname,
+ .fops = &adb_fops,
+};
+
+
+
+
+static int
+adb_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct adb_dev *dev = func_to_adb(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "adb_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ adb_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
+ &adb_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ adb_highspeed_in_desc.bEndpointAddress =
+ adb_fullspeed_in_desc.bEndpointAddress;
+ adb_highspeed_out_desc.bEndpointAddress =
+ adb_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_request *req;
+
+
+ dev->online = 0;
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+
+ adb_request_free(dev->rx_req, dev->ep_out);
+ while ((req = adb_req_get(dev, &dev->tx_idle)))
+ adb_request_free(req, dev->ep_in);
+}
+
+static int adb_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void adb_function_disable(struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "adb_function_disable cdev %p\n", cdev);
+ dev->online = 0;
+ dev->error = 1;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int adb_bind_config(struct usb_configuration *c)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ printk(KERN_INFO "adb_bind_config\n");
+
+ dev->cdev = c->cdev;
+ dev->function.name = "adb";
+ dev->function.fs_descriptors = fs_adb_descs;
+ dev->function.hs_descriptors = hs_adb_descs;
+ dev->function.bind = adb_function_bind;
+ dev->function.unbind = adb_function_unbind;
+ dev->function.set_alt = adb_function_set_alt;
+ dev->function.disable = adb_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int adb_setup(void)
+{
+ struct adb_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ INIT_LIST_HEAD(&dev->tx_idle);
+
+ _adb_dev = dev;
+
+ ret = misc_register(&adb_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ printk(KERN_ERR "adb gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void adb_cleanup(void)
+{
+ misc_deregister(&adb_device);
+
+ kfree(_adb_dev);
+ _adb_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 000000000000..56dcf217cfe5
--- /dev/null
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE 0
+#define AUDIO_AS_INTERFACE 1
+#define AUDIO_NUM_INTERFACES 2
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+ + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ [0] = AUDIO_AC_INTERFACE,
+ [1] = AUDIO_AS_INTERFACE,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = SAMPLE_RATE,
+ .rate_max = SAMPLE_RATE,
+
+ .buffer_bytes_max = 1024 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 512 * 1024,
+ .periods_min = 2,
+ .periods_max = 1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+ int card;
+ int device;
+};
+
+struct audio_dev {
+ struct usb_function func;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *substream;
+
+ struct list_head idle_reqs;
+ struct usb_ep *in_ep;
+
+ spinlock_t lock;
+
+ /* beginning, end and current position in our buffer */
+ void *buffer_start;
+ void *buffer_end;
+ void *buffer_pos;
+
+ /* byte size of a "period" */
+ unsigned int period;
+ /* bytes sent since last call to snd_pcm_period_elapsed */
+ unsigned int period_offset;
+ /* time we started playing */
+ ktime_t start_time;
+ /* number of frames sent since start_time */
+ s64 frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ list_add_tail(&req->list, &audio->idle_reqs);
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ if (list_empty(&audio->idle_reqs)) {
+ req = 0;
+ } else {
+ req = list_first_entry(&audio->idle_reqs, struct usb_request,
+ list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+ struct snd_pcm_runtime *runtime;
+ struct usb_request *req;
+ int length, length1, length2, ret;
+ s64 msecs;
+ s64 frames;
+ ktime_t now;
+
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream)
+ return;
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos)
+ return;
+
+ runtime = audio->substream->runtime;
+
+ /* compute number of frames to send */
+ now = ktime_get();
+ msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+ do_div(msecs, 1000000);
+ frames = msecs * SAMPLE_RATE;
+ do_div(frames, 1000);
+
+ /* Readjust our frames_sent if we fall too far behind.
+ * If we get too far behind it is better to drop some frames than
+ * to keep sending data too fast in an attempt to catch up.
+ */
+ if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+ audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+ frames -= audio->frames_sent;
+
+ /* We need to send something to keep the pipeline going */
+ if (frames <= 0)
+ frames = FRAMES_PER_MSEC;
+
+ while (frames > 0) {
+ req = audio_req_get(audio);
+ if (!req)
+ break;
+
+ length = frames_to_bytes(runtime, frames);
+ if (length > IN_EP_MAX_PACKET_SIZE)
+ length = IN_EP_MAX_PACKET_SIZE;
+
+ if (audio->buffer_pos + length > audio->buffer_end)
+ length1 = audio->buffer_end - audio->buffer_pos;
+ else
+ length1 = length;
+ memcpy(req->buf, audio->buffer_pos, length1);
+ if (length1 < length) {
+ /* Wrap around and copy remaining length
+ * at beginning of buffer.
+ */
+ length2 = length - length1;
+ memcpy(req->buf + length1, audio->buffer_start,
+ length2);
+ audio->buffer_pos = audio->buffer_start + length2;
+ } else {
+ audio->buffer_pos += length1;
+ if (audio->buffer_pos >= audio->buffer_end)
+ audio->buffer_pos = audio->buffer_start;
+ }
+
+ req->length = length;
+ ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("usb_ep_queue failed ret: %d\n", ret);
+ audio_req_put(audio, req);
+ break;
+ }
+
+ frames -= bytes_to_frames(runtime, length);
+ audio->frames_sent += bytes_to_frames(runtime, length);
+ }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct audio_dev *audio = req->context;
+
+ pr_debug("audio_data_complete req->status %d req->actual %d\n",
+ req->status, req->actual);
+
+ audio_req_put(audio, req);
+
+ if (!audio->buffer_start || req->status)
+ return;
+
+ audio->period_offset += req->actual;
+ if (audio->period_offset >= audio->period) {
+ snd_pcm_period_elapsed(audio->substream);
+ audio->period_offset = 0;
+ }
+ audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ case UAC_SET_MIN:
+ case UAC_SET_MAX:
+ case UAC_SET_RES:
+ value = len;
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 *buf = cdev->req->buf;
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ /* return our sample rate */
+ buf[0] = (u8)SAMPLE_RATE;
+ buf[1] = (u8)(SAMPLE_RATE >> 8);
+ buf[2] = (u8)(SAMPLE_RATE >> 16);
+ value = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ req->complete = audio_control_complete;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+ if (ret)
+ return ret;
+
+ usb_ep_enable(audio->in_ep);
+ return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ pr_debug("audio_disable\n");
+ usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = 2;
+ as_type_i_desc.bNrChannels = 2;
+
+ /* Set sample rates */
+ rate = SAMPLE_RATE;
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct audio_dev *audio = func_to_audio(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_request *req;
+ int i;
+
+ audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate our endpoint */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ ep->driver_data = audio; /* claim */
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ hs_as_in_ep_desc.bEndpointAddress =
+ fs_as_in_ep_desc.bEndpointAddress;
+
+ f->fs_descriptors = fs_audio_desc;
+ f->hs_descriptors = hs_audio_desc;
+
+ for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+ req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+ if (req) {
+ req->context = audio;
+ req->complete = audio_data_complete;
+ audio_req_put(audio, req);
+ } else
+ status = -ENOMEM;
+ }
+
+fail:
+ return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_request *req;
+
+ while ((req = audio_req_get(audio)))
+ audio_request_free(req, audio->in_ep);
+
+ snd_card_free_when_closed(audio->card);
+ audio->card = NULL;
+ audio->pcm = NULL;
+ audio->substream = NULL;
+ audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+ audio->start_time = ktime_get();
+ audio->frames_sent = 0;
+ audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->buffer_start = 0;
+ audio->buffer_end = 0;
+ audio->buffer_pos = 0;
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = substream->private_data;
+
+ runtime->private_data = audio;
+ runtime->hw = audio_hw_info;
+ snd_pcm_limit_hw_rates(runtime);
+ runtime->hw.channels_max = 2;
+
+ audio->substream = substream;
+ return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct audio_dev *audio = substream->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->substream = NULL;
+ spin_unlock_irqrestore(&audio->lock, flags);
+
+ return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+
+ if (rate != SAMPLE_RATE)
+ return -EINVAL;
+ if (channels != 2)
+ return -EINVAL;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+
+ audio->period = snd_pcm_lib_period_bytes(substream);
+ audio->period_offset = 0;
+ audio->buffer_start = runtime->dma_area;
+ audio->buffer_end = audio->buffer_start
+ + snd_pcm_lib_buffer_bytes(substream);
+ audio->buffer_pos = audio->buffer_start;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+ ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+ /* return offset of next frame to fill in our buffer */
+ return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct audio_dev *audio = substream->runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ audio_pcm_playback_start(audio);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ audio_pcm_playback_stop(audio);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct audio_dev _audio_dev = {
+ .func = {
+ .name = "audio_source",
+ .bind = audio_bind,
+ .unbind = audio_unbind,
+ .set_alt = audio_set_alt,
+ .setup = audio_setup,
+ .disable = audio_disable,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+ .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+ .open = audio_pcm_open,
+ .close = audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = audio_pcm_hw_params,
+ .hw_free = audio_pcm_hw_free,
+ .prepare = audio_pcm_prepare,
+ .trigger = audio_pcm_playback_trigger,
+ .pointer = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+ struct audio_source_config *config)
+{
+ struct audio_dev *audio;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int err;
+
+ config->card = -1;
+ config->device = -1;
+
+ audio = &_audio_dev;
+
+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (err)
+ return err;
+
+ snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+ err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+ if (err)
+ goto pcm_fail;
+ pcm->private_data = audio;
+ pcm->info_flags = 0;
+ audio->pcm = pcm;
+
+ strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ NULL, 0, 64 * 1024);
+
+ strlcpy(card->driver, "audio_source", sizeof(card->driver));
+ strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+ strlcpy(card->longname, "USB accessory audio source",
+ sizeof(card->longname));
+
+ err = snd_card_register(card);
+ if (err)
+ goto register_fail;
+
+ err = usb_add_function(c, &audio->func);
+ if (err)
+ goto add_fail;
+
+ config->card = pcm->card->number;
+ config->device = pcm->device;
+ audio->card = card;
+ return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+ snd_card_free(audio->card);
+ return err;
+}
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 000000000000..9ab94697c196
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1283 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE 16384
+#define INTR_BUFFER_SIZE 28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE 0 /* initial state, disconnected */
+#define STATE_READY 1 /* ready for userspace calls */
+#define STATE_BUSY 2 /* processing userspace calls */
+#define STATE_CANCELED 3 /* transaction canceled by host */
+#define STATE_ERROR 4 /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID 0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK 0x2001
+#define MTP_RESPONSE_DEVICE_BUSY 0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ struct usb_ep *ep_intr;
+
+ int state;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+ /* to enforce only one ioctl at a time */
+ atomic_t ioctl_excl;
+
+ struct list_head tx_idle;
+ struct list_head intr_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ wait_queue_head_t intr_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+ */
+ struct workqueue_struct *wq;
+ struct work_struct send_file_work;
+ struct work_struct receive_file_work;
+ struct file *xfer_file;
+ loff_t xfer_file_offset;
+ int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
+ int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+ .bInterval = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+ /* Naming interface "MTP" so libmtp will recognize us */
+ [INTERFACE_STRING_INDEX].s = "MTP",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+ &mtp_string_table,
+ NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ 1,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+ struct mtp_ext_config_desc_header header;
+ struct mtp_ext_config_desc_function function;
+} mtp_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = __constant_cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'M', 'T', 'P' },
+ },
+};
+
+struct mtp_device_status {
+ __le16 wLength;
+ __le16 wCode;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+ return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_endpoint_descriptor *intr_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_intr = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_in;
+ mtp_req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_out;
+ dev->rx_req[i] = req;
+ }
+ for (i = 0; i < INTR_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_intr;
+ mtp_req_put(dev, &dev->intr_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ DBG(cdev, "mtp_read(%d)\n", count);
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* we will block until we're online */
+ DBG(cdev, "mtp_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->state != STATE_OFFLINE);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ DBG(cdev, "rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->state == STATE_BUSY) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ DBG(cdev, "rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int sendZLP = 0;
+ int ret;
+
+ DBG(cdev, "mtp_write(%d)\n", count);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ if (dev->state != STATE_BUSY) {
+ DBG(cdev, "mtp_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (xfer && copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "mtp_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_write returning %d\n", r);
+ return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ send_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ struct mtp_data_header *header;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int xfer, ret, hdr_size;
+ int r = 0;
+ int sendZLP = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ break;
+ }
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ header->length = __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id =
+ __cpu_to_le32(dev->xfer_transaction_id);
+ }
+
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+ &offset);
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+ xfer = ret + hdr_size;
+ hdr_size = 0;
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "send_file_work: xfer error %d\n", ret);
+ dev->state = STATE_ERROR;
+ r = -EIO;
+ break;
+ }
+
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ DBG(cdev, "send_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ receive_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *read_req = NULL, *write_req = NULL;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int ret, cur_buf = 0;
+ int r = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "receive_file_work(%lld)\n", count);
+
+ while (count > 0 || write_req) {
+ if (count > 0) {
+ /* queue a request */
+ read_req = dev->rx_req[cur_buf];
+ cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+ read_req->length = (count > MTP_BULK_BUFFER_SIZE
+ ? MTP_BULK_BUFFER_SIZE : count);
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ }
+
+ if (write_req) {
+ DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ ret = vfs_write(filp, write_req->buf, write_req->actual,
+ &offset);
+ DBG(cdev, "vfs_write %d\n", ret);
+ if (ret != write_req->actual) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ write_req = NULL;
+ }
+
+ if (read_req) {
+ /* wait for our last read to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ /* if xfer_file_length is 0xFFFFFFFF, then we read until
+ * we get a zero length packet
+ */
+ if (count != 0xFFFFFFFF)
+ count -= read_req->actual;
+ if (read_req->actual < read_req->length) {
+ /*
+ * short packet is used to signal EOF for
+ * sizes > 4 gig
+ */
+ DBG(cdev, "got short packet\n");
+ count = 0;
+ }
+
+ write_req = read_req;
+ read_req = NULL;
+ }
+ }
+
+ DBG(cdev, "receive_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+ struct usb_request *req = NULL;
+ int ret;
+ int length = event->length;
+
+ DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+ if (length < 0 || length > INTR_BUFFER_SIZE)
+ return -EINVAL;
+ if (dev->state == STATE_OFFLINE)
+ return -ENODEV;
+
+ ret = wait_event_interruptible_timeout(dev->intr_wq,
+ (req = mtp_req_get(dev, &dev->intr_idle)),
+ msecs_to_jiffies(1000));
+ if (!req)
+ return -ETIME;
+
+ if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+ mtp_req_put(dev, &dev->intr_idle, req);
+ return -EFAULT;
+ }
+ req->length = length;
+ ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+ if (ret)
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct file *filp = NULL;
+ int ret = -EINVAL;
+
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+
+ switch (code) {
+ case MTP_SEND_FILE:
+ case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
+ {
+ struct mtp_file_range mfr;
+ struct work_struct *work;
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr.fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr.offset;
+ dev->xfer_file_length = mfr.length;
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr.command;
+ dev->xfer_transaction_id = mfr.transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+ break;
+ }
+ case MTP_SEND_EVENT:
+ {
+ struct mtp_event event;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+ ret = -EFAULT;
+ else
+ ret = mtp_send_event(dev, &event);
+ goto out;
+ }
+ }
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d\n", ret);
+ return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_open\n");
+ if (mtp_lock(&_mtp_dev->open_excl))
+ return -EBUSY;
+
+ /* clear any error condition */
+ if (_mtp_dev->state != STATE_OFFLINE)
+ _mtp_dev->state = STATE_READY;
+
+ fp->private_data = _mtp_dev;
+ return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_release\n");
+
+ mtp_unlock(&_mtp_dev->open_excl);
+ return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+ .owner = THIS_MODULE,
+ .read = mtp_read,
+ .write = mtp_write,
+ .unlocked_ioctl = mtp_ioctl,
+ .open = mtp_open,
+ .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = mtp_shortname,
+ .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+ VDBG(cdev, "mtp_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MTP OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+ value = (w_length < sizeof(mtp_os_string)
+ ? w_length : sizeof(mtp_os_string));
+ memcpy(cdev->req->buf, mtp_os_string, value);
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+ /* Handle MTP OS descriptor */
+ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == 1
+ && (ctrl->bRequestType & USB_DIR_IN)
+ && (w_index == 4 || w_index == 5)) {
+ value = (w_length < sizeof(mtp_ext_config_desc) ?
+ w_length : sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ }
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+ && w_value == 0) {
+ DBG(cdev, "MTP_REQ_CANCEL\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state == STATE_BUSY) {
+ dev->state = STATE_CANCELED;
+ wake_up(&dev->read_wq);
+ wake_up(&dev->write_wq);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We need to queue a request to read the remaining
+ * bytes, but we don't actually need to look at
+ * the contents.
+ */
+ value = w_length;
+ } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+ && w_index == 0 && w_value == 0) {
+ struct mtp_device_status *status = cdev->req->buf;
+ status->wLength =
+ __constant_cpu_to_le16(sizeof(*status));
+
+ DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ /* device status is "busy" until we report
+ * the cancelation to userspace
+ */
+ if (dev->state == STATE_CANCELED)
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+ else
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_OK);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ value = sizeof(*status);
+ }
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ ERROR(cdev, "%s: response queue error\n", __func__);
+ }
+ return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct mtp_dev *dev = func_to_mtp(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ mtp_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+ &mtp_fullspeed_out_desc, &mtp_intr_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ mtp_highspeed_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_highspeed_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = mtp_req_get(dev, &dev->intr_idle)))
+ mtp_request_free(req, dev->ep_intr);
+ dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_intr);
+ if (ret) {
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->state = STATE_READY;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "mtp_function_disable\n");
+ dev->state = STATE_OFFLINE;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_intr);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int ret = 0;
+
+ printk(KERN_INFO "mtp_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ mtp_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "mtp";
+ dev->function.strings = mtp_strings;
+ if (ptp_config) {
+ dev->function.fs_descriptors = fs_ptp_descs;
+ dev->function.hs_descriptors = hs_ptp_descs;
+ } else {
+ dev->function.fs_descriptors = fs_mtp_descs;
+ dev->function.hs_descriptors = hs_mtp_descs;
+ }
+ dev->function.bind = mtp_function_bind;
+ dev->function.unbind = mtp_function_unbind;
+ dev->function.set_alt = mtp_function_set_alt;
+ dev->function.disable = mtp_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+ struct mtp_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ init_waitqueue_head(&dev->intr_wq);
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->intr_idle);
+
+ dev->wq = create_singlethread_workqueue("f_mtp");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ INIT_WORK(&dev->send_file_work, send_file_work);
+ INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+ _mtp_dev = dev;
+
+ ret = misc_register(&mtp_device);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ destroy_workqueue(dev->wq);
+err1:
+ _mtp_dev = NULL;
+ kfree(dev);
+ printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void mtp_cleanup(void)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (!dev)
+ return;
+
+ misc_deregister(&mtp_device);
+ destroy_workqueue(dev->wq);
+ _mtp_dev = NULL;
+ kfree(dev);
+}
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 379aac7b82fc..6e8b1272ebce 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -1012,7 +1012,7 @@ static void udc_clock_enable(struct mv_udc *udc)
unsigned int i;
for (i = 0; i < udc->clknum; i++)
- clk_enable(udc->clk[i]);
+ clk_prepare_enable(udc->clk[i]);
}
static void udc_clock_disable(struct mv_udc *udc)
@@ -1020,7 +1020,7 @@ static void udc_clock_disable(struct mv_udc *udc)
unsigned int i;
for (i = 0; i < udc->clknum; i++)
- clk_disable(udc->clk[i]);
+ clk_disable_unprepare(udc->clk[i]);
}
static void udc_stop(struct mv_udc *udc)
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index e4192b887de9..81451e976c70 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1127,11 +1127,15 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+static bool rndis_initialized;
int rndis_init(void)
{
u8 i;
+ if (rndis_initialized)
+ return 0;
+
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
char name [20];
@@ -1158,6 +1162,7 @@ int rndis_init(void)
INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
}
+ rndis_initialized = true;
return 0;
}
@@ -1166,7 +1171,13 @@ void rndis_exit(void)
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
char name[20];
+#endif
+ if (!rndis_initialized)
+ return;
+ rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
sprintf(name, NAME_TEMPLATE, i);
remove_proc_entry(name, NULL);
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 141971d9051e..439c3f972f8c 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -3477,12 +3477,11 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
/**
* s3c_hsotg_release - release callback for hsotg device
* @dev: Device to for which release is called
+ *
+ * Nothing to do as the resource is allocated using devm_ API.
*/
static void s3c_hsotg_release(struct device *dev)
{
- struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
-
- kfree(hsotg);
}
/**
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 4f7f76f00c74..7cacd6ae818e 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1794,9 +1794,10 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
+ ret = 0;
out:
mutex_unlock(&tpg->tpg_mutex);
- return 0;
+ return ret;
}
static ssize_t tcm_usbg_tpg_store_nexus(
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index d0f95482f40e..598dcc1212f0 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -887,7 +887,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
- wake_up_interruptible(&port->port.close_wait);
+ wake_up(&port->port.close_wait);
exit:
spin_unlock_irq(&port->port_lock);
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index fd9b5424b860..d81d2fcbff18 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -230,7 +230,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
- if (pdata->controller_ver) {
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
setbits32(non_ehci + FSL_SOC_USB_CTRL,
ULPI_PHY_CLK_SEL);
@@ -251,7 +251,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
- if (pdata->controller_ver) {
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
@@ -267,7 +267,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
break;
}
- if (pdata->controller_ver && (phy_mode == FSL_USB2_PHY_ULPI)) {
+ if (pdata->have_sysif_regs && pdata->controller_ver &&
+ (phy_mode == FSL_USB2_PHY_ULPI)) {
/* check PHY_CLK_VALID to get phy clk valid */
if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
@@ -278,7 +279,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
- if (phy_mode != FSL_USB2_PHY_ULPI)
+ if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
return 0;
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f7bfc0b898b9..6c56297ea16b 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -43,7 +43,7 @@ static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
unsigned int i;
for (i = 0; i < ehci_mv->clknum; i++)
- clk_enable(ehci_mv->clk[i]);
+ clk_prepare_enable(ehci_mv->clk[i]);
}
static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
@@ -51,7 +51,7 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
unsigned int i;
for (i = 0; i < ehci_mv->clknum; i++)
- clk_disable(ehci_mv->clk[i]);
+ clk_disable_unprepare(ehci_mv->clk[i]);
}
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index dabb20494826..170b9399e09f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -200,6 +200,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
+ /* optional debug port, normally in the first BAR */
+ temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+ if (temp) {
+ pci_read_config_dword(pdev, temp, &temp);
+ temp >>= 16;
+ if (((temp >> 13) & 7) == 1) {
+ u32 hcs_params = ehci_readl(ehci,
+ &ehci->caps->hcs_params);
+
+ temp &= 0x1fff;
+ ehci->debug = hcd->regs + temp;
+ temp = ehci_readl(ehci, &ehci->debug->control);
+ ehci_info(ehci, "debug port %d%s\n",
+ HCS_DEBUG_PORT(hcs_params),
+ (temp & DBGP_ENABLED) ? " IN USE" : "");
+ if (!(temp & DBGP_ENABLED))
+ ehci->debug = NULL;
+ }
+ }
+
retval = ehci_setup(hcd);
if (retval)
return retval;
@@ -228,25 +248,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
- /* optional debug port, normally in the first BAR */
- temp = pci_find_capability(pdev, 0x0a);
- if (temp) {
- pci_read_config_dword(pdev, temp, &temp);
- temp >>= 16;
- if ((temp & (3 << 13)) == (1 << 13)) {
- temp &= 0x1fff;
- ehci->debug = hcd->regs + temp;
- temp = ehci_readl(ehci, &ehci->debug->control);
- ehci_info(ehci, "debug port %d%s\n",
- HCS_DEBUG_PORT(ehci->hcs_params),
- (temp & DBGP_ENABLED)
- ? " IN USE"
- : "");
- if (!(temp & DBGP_ENABLED))
- ehci->debug = NULL;
- }
- }
-
/* at least the Genesys GL880S needs fixup here */
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
temp &= 0x0f;
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 5105127c1d4b..11e0b79ff9d5 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -142,6 +142,9 @@ static int usb_get_ver_info(struct device_node *np)
return ver;
}
+ if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+ return FSL_USB_VER_OLD;
+
if (of_device_is_compatible(np, "fsl-usb2-mph")) {
if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
ver = FSL_USB_VER_1_6;
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index bd6a7447ccc9..f0ebe8e7c58b 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -58,6 +58,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include "imx21-hcd.h"
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index d370245a4ee2..5e3a6deb62b1 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -128,7 +128,8 @@ static void tmio_start_hc(struct platform_device *dev)
tmio_iowrite8(2, tmio->ccr + CCR_INTC);
dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
- tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+ tmio_ioread8(tmio->ccr + CCR_REVID),
+ (u64) hcd->rsrc_start, hcd->irq);
}
static int ohci_tmio_start(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a686cf4905bb..68914429482f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_LINK_STATE:
temp = xhci_readl(xhci, port_array[wIndex]);
+
+ /* Disable port */
+ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+ xhci_dbg(xhci, "Disable port %d\n", wIndex);
+ temp = xhci_port_state_to_neutral(temp);
+ /*
+ * Clear all change bits, so that we get a new
+ * connection event.
+ */
+ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+ PORT_OCC | PORT_RC | PORT_PLC |
+ PORT_CEC;
+ xhci_writel(xhci, temp | PORT_PE,
+ port_array[wIndex]);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ break;
+ }
+
+ /* Put link in RxDetect (enable port) */
+ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+ xhci_dbg(xhci, "Enable port %d\n", wIndex);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ break;
+ }
+
/* Software should not attempt to set
- * port link state above '5' (Rx.Detect) and the port
+ * port link state above '3' (U3) and the port
* must be enabled.
*/
if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_RX_DETECT)) {
+ (link_state > USB_SS_PORT_LS_U3)) {
xhci_warn(xhci, "Cannot set link state.\n");
goto error;
}
@@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int max_ports;
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
+ bool reset_change = false;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
+ if ((temp & PORT_RC))
+ reset_change = true;
+ }
+ if (!status && !reset_change) {
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return status ? retval : 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fb51c7085ad0..35616ffbe3ae 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
+ if (ep->desc.bInterval == 0)
+ return 0;
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval, 0, 15);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cbb44b7b9d65..59fb5c677dbe 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1725,6 +1725,15 @@ cleanup:
if (bogus_port_status)
return;
+ /*
+ * xHCI port-status-change events occur when the "or" of all the
+ * status-change bits in the portsc register changes from 0 to 1.
+ * New status changes won't cause an event if any other change
+ * bits are still set. When an event occurs, switch over to
+ * polling to avoid losing status changes.
+ */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5c72c431bab1..f1f01a834ba7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -884,6 +884,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
+ /* Don't poll the roothubs on bus suspend. */
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
@@ -1069,6 +1074,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
compliance_mode_recovery_timer_init(xhci);
+ /* Re-enable port polling. */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
+
return retval;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 7667b12f2ff5..268148de9714 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -2179,7 +2179,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
break;
retval = 0;
- dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
+ dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
"%d %d-byte writes\n",
param->iterations, param->sglen, param->length);
for (i = param->iterations; retval == 0 && i > 0; --i) {
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index f1c6c5470b92..fd3486745e64 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2298,10 +2298,7 @@ static int __init musb_init(void)
if (usb_disabled())
return 0;
- pr_info("%s: version " MUSB_VERSION ", "
- "?dma?"
- ", "
- "otg (peripheral+host)",
+ pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
musb_driver_name);
return platform_driver_register(&musb_driver);
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index e6f2ae8368bb..f7d764de6fda 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -134,6 +134,11 @@ static const resource_size_t dsps_control_module_phys[] = {
DSPS_AM33XX_CONTROL_MODULE_PHYS_1,
};
+#define USBPHY_CM_PWRDN (1 << 0)
+#define USBPHY_OTG_PWRDN (1 << 1)
+#define USBPHY_OTGVDET_EN (1 << 19)
+#define USBPHY_OTGSESSEND_EN (1 << 20)
+
/**
* musb_dsps_phy_control - phy on/off
* @glue: struct dsps_glue *
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 6223062d5d1b..abad470557c0 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -12,6 +12,14 @@ config USB_OTG_UTILS
Select this to make sure the build includes objects from
the OTG infrastructure directory.
+config USB_OTG_WAKELOCK
+ bool "Hold a wakelock when USB connected"
+ depends on WAKELOCK
+ select USB_OTG_UTILS
+ help
+ Select this to automatically hold a wakelock when USB is
+ connected, preventing suspend.
+
if USB || USB_GADGET
#
@@ -110,7 +118,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
select USB_OTG
select USB_OTG_UTILS
help
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index a844b8d35d14..ae6244a51cf6 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -7,6 +7,7 @@ ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
# infrastructure
obj-$(CONFIG_USB_OTG_UTILS) += otg.o
+obj-$(CONFIG_USB_OTG_WAKELOCK) += otg-wakelock.o
# transceiver drivers
obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c
index 1dd57504186d..eace975991a8 100644
--- a/drivers/usb/otg/mv_otg.c
+++ b/drivers/usb/otg/mv_otg.c
@@ -240,7 +240,7 @@ static void otg_clock_enable(struct mv_otg *mvotg)
unsigned int i;
for (i = 0; i < mvotg->clknum; i++)
- clk_enable(mvotg->clk[i]);
+ clk_prepare_enable(mvotg->clk[i]);
}
static void otg_clock_disable(struct mv_otg *mvotg)
@@ -248,7 +248,7 @@ static void otg_clock_disable(struct mv_otg *mvotg)
unsigned int i;
for (i = 0; i < mvotg->clknum; i++)
- clk_disable(mvotg->clk[i]);
+ clk_disable_unprepare(mvotg->clk[i]);
}
static int mv_otg_enable_internal(struct mv_otg *mvotg)
diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c
new file mode 100644
index 000000000000..e17e27299062
--- /dev/null
+++ b/drivers/usb/otg/otg-wakelock.c
@@ -0,0 +1,170 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME 2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+ char name[40];
+ struct wake_lock wakelock;
+ bool held;
+};
+
+/*
+ * VBUS present lock. Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+ if (!lock->held) {
+ wake_lock(&lock->wakelock);
+ lock->held = true;
+ }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+ wake_lock_timeout(&lock->wakelock,
+ msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+ lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+ if (lock->held) {
+ wake_unlock(&lock->wakelock);
+ lock->held = false;
+ }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+ if (!enabled) {
+ otgwl_drop(&vbus_lock);
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+ return;
+ }
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ otgwl_hold(&vbus_lock);
+ break;
+
+ case USB_EVENT_NONE:
+ case USB_EVENT_ID:
+ case USB_EVENT_CHARGER:
+ otgwl_temporary_hold(&vbus_lock);
+ break;
+
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ otgwl_handle_event(event);
+ return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_bool(val, kp);
+
+ if (rv)
+ return rv;
+
+ if (otgwl_xceiv)
+ otgwl_handle_event(otgwl_xceiv->last_event);
+
+ return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+ .set = set_enabled,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+ int ret;
+
+ otgwl_xceiv = usb_get_transceiver();
+
+ if (!otgwl_xceiv) {
+ pr_err("%s: No USB transceiver found\n", __func__);
+ return -ENODEV;
+ }
+
+ snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+ dev_name(otgwl_xceiv->dev));
+ wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+ vbus_lock.name);
+
+ otgwl_nb.notifier_call = otgwl_otg_notifications;
+ ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+ if (ret) {
+ pr_err("%s: usb_register_notifier on transceiver %s"
+ " failed\n", __func__,
+ dev_name(otgwl_xceiv->dev));
+ otgwl_xceiv = NULL;
+ wake_lock_destroy(&vbus_lock.wakelock);
+ return ret;
+ }
+
+ otgwl_handle_event(otgwl_xceiv->last_event);
+ return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index dd41f61893ef..f2985cd88021 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -545,15 +545,6 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
return 0;
}
-static void usbhsg_uep_init(struct usbhsg_gpriv *gpriv)
-{
- int i;
- struct usbhsg_uep *uep;
-
- usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
- uep->pipe = NULL;
-}
-
/*
*
* usb_ep_ops
@@ -610,7 +601,12 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- return usbhsg_pipe_disable(uep);
+ usbhsg_pipe_disable(uep);
+
+ uep->pipe->mod_private = NULL;
+ uep->pipe = NULL;
+
+ return 0;
}
static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -761,9 +757,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
usbhs_pipe_init(priv,
usbhsg_dma_map_ctrl);
usbhs_fifo_init(priv);
- usbhsg_uep_init(gpriv);
- /* dcp init */
+ /* dcp init instead of usbhsg_ep_enable() */
dcp->pipe = usbhs_dcp_malloc(priv);
dcp->pipe->mod_private = dcp;
usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
@@ -825,7 +820,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_pipe_disable(dcp);
+ usbhsg_ep_disable(&dcp->ep);
dev_dbg(dev, "stop gadget\n");
@@ -998,6 +993,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
*/
usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
uep->gpriv = gpriv;
+ uep->pipe = NULL;
snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);
uep->ep.name = uep->ep_name;
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 3d3cd6ca2689..b86815421c8d 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -661,9 +661,10 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
status = -ESHUTDOWN;
urb->actual_length = pkt->actual;
- usbhsh_ureq_free(hpriv, ureq);
usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+ usbhsh_ureq_free(hpriv, ureq);
+
usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0a373b3ae96a..ba68835d06a6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -875,6 +875,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+ /* Crucible Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 049b6e715fa4..fa5d56038276 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1259,3 +1259,9 @@
* ATI command output: Cinterion MC55i
*/
#define FTDI_CINTERION_MC55I_PID 0xA951
+
+/*
+ * Product: Comet Caller ID decoder
+ * Manufacturer: Crucible Technologies
+ */
+#define FTDI_CT_COMET_PID 0x8e08
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e6f87b76c715..478adcfcdf26 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -288,6 +288,7 @@ static void option_instat_callback(struct urb *urb);
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define ALCATEL_PRODUCT_X220_X500D 0x0017
+#define ALCATEL_PRODUCT_L100V 0x011e
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -429,9 +430,12 @@ static void option_instat_callback(struct urb *urb);
#define MEDIATEK_VENDOR_ID 0x0e8d
#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
#define MEDIATEK_PRODUCT_7208_1COM 0x7101
#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_7103_2COM 0x7103
+#define MEDIATEK_PRODUCT_7106_2COM 0x7106
#define MEDIATEK_PRODUCT_FP_1COM 0x0003
#define MEDIATEK_PRODUCT_FP_2COM 0x0023
#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
@@ -441,6 +445,10 @@ static void option_instat_callback(struct urb *urb);
#define CELLIENT_VENDOR_ID 0x2692
#define CELLIENT_PRODUCT_MEN200 0x9005
+/* Hyundai Petatel Inc. products */
+#define PETATEL_VENDOR_ID 0x1ff4
+#define PETATEL_PRODUCT_NP10T 0x600e
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -923,7 +931,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1190,6 +1199,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1294,7 +1305,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e7068c508800..2e6e79714baa 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/ion/Kconfig"
+
config VGASTATE
tristate
default n
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
index 4d99dd7a6831..395cb6a8d8f3 100644
--- a/drivers/video/ssd1307fb.c
+++ b/drivers/video/ssd1307fb.c
@@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par)
u32 page_length = SSD1307FB_WIDTH * i;
u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
u8 byte = *(vmem + index);
- u8 bit = byte & (1 << (7 - (j % 8)));
- bit = bit >> (7 - (j % 8));
+ u8 bit = byte & (1 << (j % 8));
+ bit = bit >> (j % 8);
buf |= bit << k;
}
ssd1307fb_write_data(par->client, buf);
diff --git a/fs/Kconfig b/fs/Kconfig
index cfe512fd1caf..28c63db1cc5e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -203,6 +203,10 @@ source "fs/hfsplus/Kconfig"
source "fs/befs/Kconfig"
source "fs/bfs/Kconfig"
source "fs/efs/Kconfig"
+
+# Patched by YAFFS
+source "fs/yaffs2/Kconfig"
+
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index 9d53192236fc..5f1a68619a2e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -127,3 +127,6 @@ obj-$(CONFIG_F2FS_FS) += f2fs/
obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
+
+# Patched by YAFFS
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
diff --git a/fs/buffer.c b/fs/buffer.c
index c017a2dfb909..b8cab4c88afb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1711,7 +1711,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh(write_op, bh);
+ submit_bh(write_op |
+ (buffer_meta(bh) << __REQ_META), bh);
nr_underway++;
}
bh = next;
@@ -1765,7 +1766,8 @@ recover:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh(write_op, bh);
+ submit_bh(write_op |
+ (buffer_meta(bh) << __REQ_META), bh);
nr_underway++;
}
bh = next;
@@ -2935,6 +2937,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
void *kaddr = kmap_atomic(bh->b_page);
memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
kunmap_atomic(kaddr);
+ flush_dcache_page(bh->b_page);
}
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 153bb1e42e63..a5f12b7e228d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -176,7 +176,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
opts->uid = uid;
break;
case Opt_gid:
- if (match_octal(&args[0], &option))
+ if (match_int(&args[0], &option))
return -EINVAL;
gid = make_kgid(current_user_ns(), option);
if (!gid_valid(gid))
diff --git a/fs/exec.c b/fs/exec.c
index 18c45cac368f..20df02c1cc70 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max)
if (IS_ERR(p))
return -EFAULT;
- if (i++ >= max)
+ if (i >= max)
return -E2BIG;
+ ++i;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index b4323ba846b5..27e8adddbff7 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -107,6 +107,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
{
int err = 0;
+ set_buffer_meta(bh);
+
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
if (err) {
@@ -143,6 +145,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
+ set_buffer_meta(bh);
+
ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbfe13bf5b2a..8baf7e0c24bc 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4308,8 +4308,10 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (wbc->sync_mode == WB_SYNC_ALL) {
+ set_buffer_meta(iloc.bh);
sync_dirty_buffer(iloc.bh);
+ }
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
"IO error syncing inode");
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 58bf744dbf39..95837ce0bc8b 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -776,6 +776,13 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
return ret;
}
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+ struct super_block *sb = dir->i_sb;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ return sbi->vol_id;
+}
+
static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -792,6 +799,8 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
short_only = 0;
both = 1;
break;
+ case VFAT_IOCTL_GET_VOLUME_ID:
+ return fat_ioctl_volume_id(inode);
default:
return fat_generic_ioctl(filp, cmd, arg);
}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 12701a567752..ea84a4943724 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -82,6 +82,7 @@ struct msdos_sb_info {
const void *dir_ops; /* Opaque; default directory operations */
int dir_per_block; /* dir entries per block */
int dir_per_block_bits; /* log2(dir_per_block) */
+ unsigned long vol_id; /* volume ID */
int fatent_shift;
struct fatent_operations *fatent_ops;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index f8f491677a4a..a852f3f2d13d 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1175,6 +1175,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
struct fat_boot_sector *b;
+ struct fat_boot_bsx *bsx;
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1320,6 +1321,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
goto out_fail;
}
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
if (!IS_FSINFO(fsinfo)) {
fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1335,8 +1338,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
}
brelse(fsinfo_bh);
+ } else {
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
}
+ /* interpret volume ID as a little endian 32 bit integer */
+ sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+ ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 310972b72a66..017de60f8ccd 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1164,7 +1164,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if ((inode->i_state & flags) == flags)
return;
- if (unlikely(block_dump))
+ if (unlikely(block_dump > 1))
block_dump___mark_inode_dirty(inode);
spin_lock(&inode->i_lock);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c16335315e5d..dd70960a39a4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
+#include <linux/freezer.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -386,7 +387,10 @@ __acquires(fc->lock)
* Wait it out.
*/
spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+
+ while (req->state != FUSE_REQ_FINISHED)
+ wait_event_freezable(req->waitq,
+ req->state == FUSE_REQ_FINISHED);
spin_lock(&fc->lock);
if (!req->aborted)
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index a2862339323b..81cc7eaff863 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -446,7 +446,8 @@ int __log_start_commit(journal_t *journal, tid_t target)
* currently running transaction (if it exists). Otherwise,
* the target tid must be an old one.
*/
- if (journal->j_running_transaction &&
+ if (journal->j_commit_request != target &&
+ journal->j_running_transaction &&
journal->j_running_transaction->t_tid == target) {
/*
* We want a new commit: OK, mark the request and wakeup the
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9b43ff77a51e..fcf7b79bc1bd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -137,6 +137,12 @@ struct pid_entry {
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG|(MODE)), \
+ &proc_##OTYPE##_inode_operations, \
+ &proc_##OTYPE##_operations, {})
+
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
@@ -1046,7 +1052,15 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
task->signal->oom_score_adj_min = (short)oom_score_adj;
trace_oom_score_adj_update(task);
-
+ /*
+ * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
+ * always attainable.
+ */
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ task->signal->oom_adj = OOM_DISABLE;
+ else
+ task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
+ OOM_SCORE_ADJ_MAX;
err_sighand:
unlock_task_sighand(task, &flags);
err_task_lock:
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 9d863fb501f9..f2bc3dfd0b88 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -296,7 +296,7 @@ EXPORT_SYMBOL(seq_read);
* seq_lseek - ->llseek() method for sequential files.
* @file: the file in question
* @offset: new position
- * @origin: 0 for absolute, 1 for relative position
+ * @whence: 0 for absolute, 1 for relative position
*
* Ready-made ->f_op->llseek()
*/
diff --git a/fs/udf/super.c b/fs/udf/super.c
index d44fb568abe1..e9be396a558d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -307,7 +307,8 @@ static void udf_sb_free_partitions(struct super_block *sb)
{
struct udf_sb_info *sbi = UDF_SB(sb);
int i;
-
+ if (sbi->s_partmaps == NULL)
+ return;
for (i = 0; i < sbi->s_partitions; i++)
udf_free_partition(&sbi->s_partmaps[i]);
kfree(sbi->s_partmaps);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 26673a0b20e7..56d1614760cf 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -175,7 +175,7 @@ xfs_buf_get_maps(
bp->b_map_count = map_count;
if (map_count == 1) {
- bp->b_maps = &bp->b_map;
+ bp->b_maps = &bp->__b_map;
return 0;
}
@@ -193,7 +193,7 @@ static void
xfs_buf_free_maps(
struct xfs_buf *bp)
{
- if (bp->b_maps != &bp->b_map) {
+ if (bp->b_maps != &bp->__b_map) {
kmem_free(bp->b_maps);
bp->b_maps = NULL;
}
@@ -377,8 +377,8 @@ xfs_buf_allocate_memory(
}
use_alloc_page:
- start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
+ start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
+ end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
>> PAGE_SHIFT;
page_count = end - start;
error = _xfs_buf_get_pages(bp, page_count, flags);
@@ -640,7 +640,7 @@ _xfs_buf_read(
xfs_buf_flags_t flags)
{
ASSERT(!(flags & XBF_WRITE));
- ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
+ ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
@@ -1709,7 +1709,7 @@ xfs_buf_cmp(
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
- diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
+ diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
if (diff < 0)
return -1;
if (diff > 0)
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 23f5642480bb..433a12ed7b17 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -151,7 +151,7 @@ typedef struct xfs_buf {
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
struct xfs_buf_map *b_maps; /* compound buffer map */
- struct xfs_buf_map b_map; /* inline compound buffer map */
+ struct xfs_buf_map __b_map; /* inline compound buffer map */
int b_map_count;
int b_io_length; /* IO size in BBs */
atomic_t b_pin_count; /* pin count */
@@ -330,8 +330,8 @@ void xfs_buf_stale(struct xfs_buf *bp);
* In future, uncached buffers will pass the block number directly to the io
* request function and hence these macros will go away at that point.
*/
-#define XFS_BUF_ADDR(bp) ((bp)->b_map.bm_bn)
-#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
+#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
+#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index becf4a97efc6..77b09750e92c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -71,7 +71,7 @@ xfs_buf_item_log_debug(
chunk_num = byte >> XFS_BLF_SHIFT;
word_num = chunk_num >> BIT_TO_WORD_SHIFT;
bit_num = chunk_num & (NBWORD - 1);
- wordp = &(bip->bli_format.blf_data_map[word_num]);
+ wordp = &(bip->__bli_format.blf_data_map[word_num]);
bit_set = *wordp & (1 << bit_num);
ASSERT(bit_set);
byte++;
@@ -237,7 +237,7 @@ xfs_buf_item_size(
* cancel flag in it.
*/
trace_xfs_buf_item_size_stale(bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
return bip->bli_format_count;
}
@@ -278,7 +278,7 @@ xfs_buf_item_format_segment(
uint buffer_offset;
/* copy the flags across from the base format item */
- blfp->blf_flags = bip->bli_format.blf_flags;
+ blfp->blf_flags = bip->__bli_format.blf_flags;
/*
* Base size is the actual size of the ondisk structure - it reflects
@@ -287,6 +287,17 @@ xfs_buf_item_format_segment(
*/
base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+
+ nvecs = 0;
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+ if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
+ /*
+ * If the map is not be dirty in the transaction, mark
+ * the size as zero and do not advance the vector pointer.
+ */
+ goto out;
+ }
+
vecp->i_addr = blfp;
vecp->i_len = base_size;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
@@ -301,15 +312,13 @@ xfs_buf_item_format_segment(
*/
trace_xfs_buf_item_format_stale(bip);
ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
- blfp->blf_size = nvecs;
- return vecp;
+ goto out;
}
/*
* Fill in an iovec for each set of contiguous chunks.
*/
- first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
- ASSERT(first_bit != -1);
+
last_bit = first_bit;
nbits = 1;
for (;;) {
@@ -371,7 +380,8 @@ xfs_buf_item_format_segment(
nbits++;
}
}
- bip->bli_format.blf_size = nvecs;
+out:
+ blfp->blf_size = nvecs;
return vecp;
}
@@ -405,7 +415,7 @@ xfs_buf_item_format(
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(lip)))
- bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
+ bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
bip->bli_flags &= ~XFS_BLI_INODE_BUF;
}
@@ -485,7 +495,7 @@ xfs_buf_item_unpin(
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(xfs_buf_islocked(bp));
ASSERT(XFS_BUF_ISSTALE(bp));
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
trace_xfs_buf_item_unpin_stale(bip);
@@ -601,7 +611,7 @@ xfs_buf_item_unlock(
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
- int aborted;
+ int aborted, clean, i;
uint hold;
/* Clear the buffer's association with this transaction. */
@@ -631,7 +641,7 @@ xfs_buf_item_unlock(
*/
if (bip->bli_flags & XFS_BLI_STALE) {
trace_xfs_buf_item_unlock_stale(bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
if (!aborted) {
atomic_dec(&bip->bli_refcount);
return;
@@ -644,8 +654,15 @@ xfs_buf_item_unlock(
* If the buf item isn't tracking any data, free it, otherwise drop the
* reference we hold to it.
*/
- if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size))
+ clean = 1;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+ bip->bli_formats[i].blf_map_size)) {
+ clean = 0;
+ break;
+ }
+ }
+ if (clean)
xfs_buf_item_relse(bp);
else
atomic_dec(&bip->bli_refcount);
@@ -716,7 +733,7 @@ xfs_buf_item_get_format(
bip->bli_format_count = count;
if (count == 1) {
- bip->bli_formats = &bip->bli_format;
+ bip->bli_formats = &bip->__bli_format;
return 0;
}
@@ -731,7 +748,7 @@ STATIC void
xfs_buf_item_free_format(
struct xfs_buf_log_item *bip)
{
- if (bip->bli_formats != &bip->bli_format) {
+ if (bip->bli_formats != &bip->__bli_format) {
kmem_free(bip->bli_formats);
bip->bli_formats = NULL;
}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 6850f49f4af3..16def435944a 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -104,7 +104,7 @@ typedef struct xfs_buf_log_item {
#endif
int bli_format_count; /* count of headers */
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
- struct xfs_buf_log_format bli_format; /* embedded in-log header */
+ struct xfs_buf_log_format __bli_format; /* embedded in-log header */
} xfs_buf_log_item_t;
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 7536faaa61e7..12afe07a91d7 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -355,10 +355,12 @@ xfs_dir2_block_addname(
/*
* If need to compact the leaf entries, do it now.
*/
- if (compact)
+ if (compact) {
xfs_dir2_block_compact(tp, bp, hdr, btp, blp, &needlog,
&lfloghigh, &lfloglow);
- else if (btp->stale) {
+ /* recalculate blp post-compaction */
+ blp = xfs_dir2_block_leaf_p(btp);
+ } else if (btp->stale) {
/*
* Set leaf logging boundaries to impossible state.
* For the no-stale case they're set explicitly.
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 5f53e75409b8..8a59f8546552 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -784,11 +784,11 @@ xfs_qm_scall_getquota(
(XFS_IS_OQUOTA_ENFORCED(mp) &&
(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
dst->d_id != 0) {
- if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
+ if ((dst->d_bcount > dst->d_blk_softlimit) &&
(dst->d_blk_softlimit > 0)) {
ASSERT(dst->d_btimer != 0);
}
- if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
+ if ((dst->d_icount > dst->d_ino_softlimit) &&
(dst->d_ino_softlimit > 0)) {
ASSERT(dst->d_itimer != 0);
}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 4fc17d479d42..3edf5dbee001 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -93,7 +93,7 @@ _xfs_trans_bjoin(
xfs_buf_item_init(bp, tp->t_mountp);
bip = bp->b_fspriv;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+ ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
if (reset_recur)
bip->bli_recur = 0;
@@ -432,7 +432,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
bip = bp->b_fspriv;
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+ ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_brelse(bip);
@@ -519,7 +519,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+ ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_HOLD;
@@ -539,7 +539,7 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+ ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
@@ -598,7 +598,7 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bip->bli_flags &= ~XFS_BLI_STALE;
ASSERT(XFS_BUF_ISSTALE(bp));
XFS_BUF_UNSTALE(bp);
- bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
+ bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
}
tp->t_flags |= XFS_TRANS_DIRTY;
@@ -643,6 +643,7 @@ xfs_trans_binval(
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
+ int i;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -657,8 +658,8 @@ xfs_trans_binval(
*/
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
- ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
return;
@@ -668,10 +669,12 @@ xfs_trans_binval(
bip->bli_flags |= XFS_BLI_STALE;
bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
- bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
- bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
- memset((char *)(bip->bli_format.blf_data_map), 0,
- (bip->bli_format.blf_map_size * sizeof(uint)));
+ bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
+ bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ memset(bip->bli_formats[i].blf_data_map, 0,
+ (bip->bli_formats[i].blf_map_size * sizeof(uint)));
+ }
bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
}
@@ -775,5 +778,5 @@ xfs_trans_dquot_buf(
type == XFS_BLF_GDQUOT_BUF);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- bip->bli_format.blf_flags |= type;
+ bip->__bli_format.blf_flags |= type;
}
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 000000000000..635414059997
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,161 @@
+#
+# YAFFS file system configurations
+#
+
+config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ YAFFS2, or Yet Another Flash Filing System, is a filing system
+ optimised for NAND Flash chips.
+
+ To compile the YAFFS2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on YAFFS2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets Yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables Yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable YAFFS from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults Yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally YAFFS only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 000000000000..e63a28aa3ed6
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 000000000000..f9cd5becd8f4
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,396 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+#ifdef CONFIG_YAFFS_KMALLOC_ALLOCATOR
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ return (struct yaffs_tnode *)kmalloc(dev->tnode_size, GFP_NOFS);
+}
+
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ dev = dev;
+ kfree(tn);
+}
+
+void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return (struct yaffs_obj *)kmalloc(sizeof(struct yaffs_obj));
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ dev = dev;
+ kfree(obj);
+}
+
+#else
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct yaffs_obj *free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,"Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ YBUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->allocated_obj_list = NULL;
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+
+ allocator->allocated_obj_list = tmp;
+ }
+
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ if (new_objs) {
+ kfree(new_objs);
+ new_objs = NULL;
+ }
+ if (list) {
+ kfree(list);
+ list = NULL;
+ }
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj - 1; i++) {
+ new_objs[i].siblings.next =
+ (struct list_head *)(&new_objs[i + 1]);
+ }
+
+ new_objs[n_obj - 1].siblings.next = (void *)allocator->free_objs;
+ allocator->free_objs = new_objs;
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_objs)
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (allocator->free_objs) {
+ obj = allocator->free_objs;
+ allocator->free_objs =
+ (struct yaffs_obj *)(allocator->free_objs->siblings.next);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator)
+ YBUG();
+ else {
+ /* Link into the free list. */
+ obj->siblings.next = (struct list_head *)(allocator->free_objs);
+ allocator->free_objs = obj;
+ allocator->n_free_objects++;
+ }
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ if (dev->allocator) {
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+ } else {
+ YBUG();
+ }
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (!dev->allocator) {
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+ } else {
+ YBUG();
+ }
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 000000000000..4d5f2aec89ff
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 000000000000..9b47d376310c
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 000000000000..33d541d69441
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 000000000000..7df42cd0066e
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,98 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ YBUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 000000000000..cf9ea58da0d9
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 000000000000..4e40f437e655
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,415 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->param.erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ if (dev->param.
+ erase_fn(dev,
+ i - dev->block_offset /* realign */ )) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->param.bad_block_fn(dev, i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ int realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk,
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d oid %d seq %d eccr %d",
+ i, tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->
+ blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->param.write_chunk_tags_fn ||
+ !dev->param.read_chunk_tags_fn ||
+ !dev->param.erase_fn || !dev->param.bad_block_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ dev->checkpt_byte_offs = 0;
+ return yaffs_checkpt_erase(dev);
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 +
+ 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+ }
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int realigned_chunk;
+
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_writes++;
+
+ dev->param.write_chunk_tags_fn(dev, realigned_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+
+ int chunk;
+ int realigned_chunk;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ ok = 0;
+ else {
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->param.read_chunk_tags_fn(dev,
+ realigned_chunk,
+ dev->
+ checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1)
+ || tags.ecc_result > YAFFS_ECC_RESULT_FIXED
+ || tags.seq_number !=
+ YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+ }
+ }
+
+ if (ok) {
+ *data_bytes =
+ dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs != 0)
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ int i;
+ for (i = 0;
+ i < dev->blocks_in_checkpt
+ && dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+ if (dev->internal_start_block <= blk
+ && blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ /* Todo this looks odd... */
+ }
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,"checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 000000000000..361c6067717e
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 000000000000..e95a8069a8c5
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,298 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
+ * this bytes influence on the line parity.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* Swap the bytes into the wrong order */
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+#endif
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* swap the bytes to correct for the wrong order */
+ unsigned char t;
+
+ t = d0;
+ d0 = d1;
+ d1 = t;
+#endif
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 000000000000..b0c461d699e6
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 000000000000..d87acbde997c
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 000000000000..f4ae9deed727
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5164 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_tagsvalidity.h"
+#include "yaffs_getblockinfo.h"
+
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_nand.h"
+
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+
+#include "yaffs_attribs.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve);
+
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 * offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].line = 0; /* not in use */
+ dev->temp_buffer[i].buffer = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
+{
+ int i, j;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].line == 0) {
+ dev->temp_buffer[i].line = line_no;
+ if ((i + 1) > dev->max_temp) {
+ dev->max_temp = i + 1;
+ for (j = 0; j <= i; j++)
+ dev->temp_buffer[j].max_line =
+ dev->temp_buffer[j].line;
+ }
+
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Out of temp buffers at line %d, other held by lines:",
+ line_no);
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ yaffs_trace(YAFFS_TRACE_BUFFERS," %d", dev->temp_buffer[i].line);
+
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].line = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer in line %d",
+ line_no);
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 * buffer)
+{
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: unmaged buffer detected.");
+ return 0;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ data = data;
+ tags = tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ tags = tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire this */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ n = abs(n);
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 * buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xFF)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS, "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ ((reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block);
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks" );
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* Not enough space to allocate unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR, "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" );
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ if (dev->alloc_block > 0) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 * data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retired_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ yaffs_init_tags(&tags);
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR * name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ const YUCHAR *bname = (const YUCHAR *)name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH / 2))) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*bname) & 0x1f) * i;
+ i++;
+ bname++;
+ }
+ }
+ return sum;
+}
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+ if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH)
+ strcpy(obj->short_name, name);
+ else
+ obj->short_name[0] = _Y('\0');
+#endif
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ mask =
+ dev->tnode_mask >> ( /*dev->tnode_width - */ bit_in_word);
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ dev = dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+ * be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0
+ || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, then release it. */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->
+ internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ }
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file during scanning
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ YBUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only ever
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+ * We have a duplicate now we need to decide which one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+ * YAFFS1: Get both sets of tags and compare serial numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise the chunks will
+ * not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the existing one
+ * Use existing.
+ * Delete the new one and return early so that the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+ * of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = YAFFS_NTNODES_INTERNAL - 1; all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal
+ [i],
+ level - 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal
+ [i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Hoosterman... how could this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+
+ }
+ return 1;
+
+ }
+
+ }
+
+ return 1;
+
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ YBUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ YBUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ YBUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR * new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+ if (obj->my_dev->param.is_yaffs2)
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ else
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir
+ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are allowed.
+ * else only proceed if the new name does not exist and if we're putting
+ * it into a directory.
+ */
+ if ((unlink_op ||
+ del_op ||
+ force ||
+ (shadows > 0) ||
+ !existing_target) &&
+ new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >=
+ 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ----------------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a limited number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches > 0) {
+ do {
+ cache = NULL;
+
+ /* Find the dirty cache for this object with the lowest chunk id. */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache
+ || dev->cache[i].chunk_id <
+ lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+
+ }
+
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->param.n_caches > 0) {
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the last recently used object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate, we actually flush the object
+ * the last recently used page.
+ */
+
+ /* With locking we can't assume we can use entry zero */
+
+ the_obj = NULL;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage
+ || !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+
+ }
+ return cache;
+ } else {
+ return NULL;
+ }
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+
+ if (dev->param.n_caches > 0) {
+ if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ int i;
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_last_use++;
+
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+ }
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ if (object->my_dev->param.n_caches > 0) {
+ struct yaffs_cache *cache =
+ yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+
+ if (!obj)
+ YBUG();
+ if (obj->parent)
+ YBUG();
+ if (!list_empty(&obj->siblings))
+ YBUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (obj->deleted &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev,
+ obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (tn) {
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i ==
+ 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+
+ }
+
+ return tn;
+
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level > 0) {
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top,
+ file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but the height
+ * is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (obj) {
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ int found = 0;
+ struct list_head *i;
+
+ u32 n = (u32) bucket;
+
+ /* yaffs_check_obj_hash_sane(); */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+
+ /* Don't tell the VFS about this one if it is defered free */
+ if (in->defered_free)
+ return NULL;
+
+ return in;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ if (the_obj) {
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size = ~0; /* max */
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ }
+
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+ obj->fake = 1; /* it is fake so it might have no NAND presence... */
+ obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlink_allowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR * str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR * name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR * alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ if (str)
+ kfree(str);
+ return NULL;
+ }
+
+ if (in) {
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id =
+ equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail the creation */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ yaffs_update_parent(parent);
+ }
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj
+ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0)) {
+ return equiv_obj;
+ } else {
+ return NULL;
+ }
+
+}
+
+
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (dev->block_info) {
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ }
+
+ if (dev->block_info && dev->chunk_bits) {
+ memset(dev->block_info, 0,
+ n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else if (dev->block_info)
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else if (dev->chunk_bits)
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+
+ int erased_ok = 0;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing this block */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
+ || !yaffs_skip_verification(dev))) {
+ int i;
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased
+ (dev, block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (erased_ok) {
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE,
+ "Erased block %d", block_no);
+ } else {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ }
+}
+
+
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int new_chunk;
+ int mark_flash;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int matching_chunk;
+ int max_copies;
+
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+
+ struct yaffs_ext_tags tags;
+
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ struct yaffs_obj *object;
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for ( /* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0; dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+ /* This page is in use and might need to be copied off */
+
+ max_copies--;
+
+ mark_flash = 1;
+
+ yaffs_init_tags(&tags);
+
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ matching_chunk = old_chunk; /* Defeat the test */
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+ */
+
+ /* Free chunks already includes softdeleted chunks.
+ * How ever this chunk is going to soon be really deleted
+ * which will increment free chunks.
+ * We have to decrement free chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up the object */
+ dev->gc_cleanup_list[dev->
+ n_clean_ups]
+ = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (0) {
+ /* Todo object && object->deleted && object->n_data_chunks == 0 */
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+ object->hdr_chunk = 0;
+ yaffs_free_tnode(object->my_dev,
+ object->
+ variant.file_variant.
+ top);
+ object->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(object);
+
+ } else if (object) {
+ /* It's either a data chunk in a live file or
+ * an ObjectHeader, so we're interested in it.
+ * NB Need to keep the ObjectHeaders of deleted files
+ * until the whole file has been deleted off
+ */
+ tags.serial_number++;
+
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the shrinkheader flags first
+ * Also need to clean up shadowing.
+ * We no longer want the shrink_header flag since its work is done
+ * and if it is left in place it will mess up scanning.
+ */
+
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *)
+ buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ oh->file_size =
+ object->variant.
+ file_variant.
+ file_size;
+ tags.extra_length =
+ oh->file_size;
+ }
+
+ yaffs_verify_oh(object, oh,
+ &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ (u8 *)
+ oh,
+ &tags,
+ 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ buffer,
+ &tags,
+ 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Ok, now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk =
+ new_chunk;
+ object->serial =
+ tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ int ok;
+ ok = yaffs_put_chunk_in_file(object, tags.chunk_id, new_chunk, 0);
+ }
+ }
+ }
+
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk,
+ mark_flash, __LINE__);
+
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.
+ file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block gumming
+ * up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+ * search harder.
+ * else (we're doing a leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1
+ || pages_used < dev->gc_pages_in_use)
+ && yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try selecting the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable) {
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+ }
+
+ /* This loop should pass the first time.
+ * We'll only see looping here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
+ && dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ yaffs_init_tags(&tags);
+
+ tags.is_deleted = 1;
+
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+
+ yaffs_clear_chunk_bit(dev, block, page);
+
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ yaffs_block_became_dirty(dev, block);
+ }
+
+ }
+
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ yaffs_init_tags(&new_tags);
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ YBUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR * name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR * name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer;
+
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *chunk_data;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in)
+ return;
+
+ dev = in->my_dev;
+
+ if (in->lazy_loaded && in->hdr_chunk > 0) {
+ in->lazy_loaded = 0;
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunk_data,
+ &tags);
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+ }
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR * name,
+ const YCHAR * oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR * oh_name,
+ const YCHAR * name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so do a unicode to ascii conversion */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* It is a unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name,
+ YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj_hdr *oh = NULL;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (!in->fake || in == dev->root_dir ||
+ force || xmod) {
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xFF, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xFF, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ oh->file_size =
+ (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
+ || oh->parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) ? 0 : in->
+ variant.file_variant.file_size;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ yaffs_init_tags(&new_tags);
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_length = oh->file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (new_chunk_id >= 0) {
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0) {
+ yaffs_chunk_del(dev, prev_chunk_id, 1,
+ __LINE__);
+ }
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.
+ chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+ }
+
+ ret_val = new_chunk_id;
+
+ }
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return ret_val;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ /* chunk = offset / dev->data_bytes_per_chunk + 1; */
+ /* start = offset % dev->data_bytes_per_chunk; */
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+ * or we're using inband tags then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+ }
+
+ } else {
+
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+
+ }
+
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+
+ }
+
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ int start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ u32 chunk_start;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (chunk * dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %d gives chunk %d start %d",
+ (int)offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now folks, to calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of file then
+ * we need to write back as much as was there before.
+ */
+
+ chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0
+ || n_writeback > dev->data_bytes_per_chunk)
+ YBUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both start and end chunk),
+ * or we're using inband tags, so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache
+ && yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_trhrough) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+
+ } else {
+ chunk_written = -1; /* fail the write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe both start and end chunk)
+ * Read into the local buffer then copy, then copy over and write back.
+ */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+
+ }
+
+ } else {
+ /* A full chunk. Write directly from the supplied buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
+
+ int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
+ dev->data_bytes_per_chunk;
+ int i;
+ int chunk_id;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+ if (chunk_id > 0) {
+ if (chunk_id <
+ (dev->internal_start_block *
+ dev->param.chunks_per_block)
+ || chunk_id >=
+ ((dev->internal_end_block +
+ 1) * dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+ }
+
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ int ret_val;
+ if (in->dirty) {
+ yaffs_flush_file_cache(in);
+ if (data_sync) /* Only sync data */
+ ret_val = YAFFS_OK;
+ else {
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ ret_val = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
+ 0) ? YAFFS_OK : YAFFS_FAIL;
+ }
+ } else {
+ ret_val = YAFFS_OK;
+ }
+
+ return ret_val;
+
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ if (in->variant.symlink_variant.alias)
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+
+ int del_now = 0;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ if (obj)
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory (so that the rename can work)
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if(old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+
+ if (obj && obj->rename_allowed) {
+
+ /* Now do the handling for an existing target, if there is one */
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)){
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc otherwise it can mess up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (!backward_scanning) {
+ /* Handle YAFFS1 forward scanning case
+ * For YAFFS1 we always do the deletion
+ */
+
+ } else {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list)
+{
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ while (hard_list) {
+ hl = hard_list;
+ hard_list = (struct yaffs_obj *)(hard_list->hard_links.next);
+
+ in = yaffs_find_by_number(dev,
+ hl->variant.
+ hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships between
+ * directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been deleted
+ * leaving the object "hanging" without being rooted in the directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY
+ && depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found,
+ obj);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ if (lh) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+
+ /* Need to use UnlinkObject since Delete would not handle
+ * hardlinked objects correctly.
+ */
+ yaffs_unlink_obj(obj);
+ }
+ }
+
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR * name)
+{
+ int sum;
+
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ YBUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ YBUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ YBUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum
+ || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (strncmp
+ (name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR * name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+
+ yaffs_check_obj_details_loaded(obj);
+
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ }
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ }
+#endif
+ else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+int yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
+{
+
+ /* Common functions, gotta have */
+ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+ if (dev->param.write_chunk_tags_fn &&
+ dev->param.read_chunk_tags_fn &&
+ !dev->param.write_chunk_fn &&
+ !dev->param.read_chunk_fn &&
+ dev->param.bad_block_fn && dev->param.query_block_fn)
+ return 1;
+#endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+ if (!dev->param.is_yaffs2 &&
+ !dev->param.write_chunk_tags_fn &&
+ !dev->param.read_chunk_tags_fn &&
+ dev->param.write_chunk_fn &&
+ dev->param.read_chunk_fn &&
+ !dev->param.bad_block_fn && !dev->param.query_block_fn)
+ return 1;
+
+ return 0; /* bad */
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()" );
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retired_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data)
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ if (dev->param.deinitialise_flash_fn)
+ dev->param.deinitialise_flash_fn(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 000000000000..307eba28676f
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,915 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xFF
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941FF53
+
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 6
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+#endif
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+#define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE -1)
+
+#define YAFFS_CHECKPOINT_VERSION 4
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_SB_HEADER 0x10
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* Tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
+ * the structure size will get blown out.
+ */
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+#endif
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+
+ unsigned validity0;
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 then this is not part of an object (unused) */
+ unsigned chunk_id; /* If 0 then this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* There is extra info available if this is not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ unsigned extra_length; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object Id if it is a hard link */
+
+ unsigned validity1;
+
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of deciding
+ * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+ u32 needs_retiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+ u32 skip_erased_check:1; /* If this is set we can skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ u32 has_shrink_hdr:1; /* This block has at least one shrink object header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+#endif
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to directories, files, symlinks - not hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ int file_size;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* device stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 reserved[2];
+ int shadows_obj; /* This object header shadows the specified object if > 0 */
+
+ /* is_shrink applies to object headers written when we shrink the file (ie resize) */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ u32 file_size;
+ u32 scanned_size;
+ u32 shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory. */
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects are not allowed to be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and is missing some detail */
+
+ u8 defered_free:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created so skip some checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs or not. */
+ u8 has_xattr:1; /* This object has xattribs. Valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND. Cached here */
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in this hash bucket */
+
+ struct list_head hard_links; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks attached to the file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ u32 size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+ int max_line;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and defualt values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* We want this tuneable so that we can reduce */
+ /* reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled, else
+ * the number of short op caches (don't use too many).
+ * 10 to 20 is a good bet.
+ */
+ int use_nand_ecc; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often we should check to do a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS) */
+
+ int (*write_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_spare * spare);
+ int (*read_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_spare * spare);
+ int (*erase_fn) (struct yaffs_dev * dev, int flash_block);
+ int (*initialise_flash_fn) (struct yaffs_dev * dev);
+ int (*deinitialise_flash_fn) (struct yaffs_dev * dev);
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ int (*write_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags * tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags * tags);
+ int (*bad_block_fn) (struct yaffs_dev * dev, int block_no);
+ int (*query_block_fn) (struct yaffs_dev * dev, int block_no,
+ enum yaffs_block_state * state,
+ u32 * seq_number);
+#endif
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj * obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev * dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control) (struct yaffs_dev * dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for power-of-2 sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpoint pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* was allocated using alternative strategy */
+ unsigned chunk_bits_alt:1; /* was allocated using alternative strategy */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+ * u8 buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+ * struct yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter. Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Statistcs */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retired_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+int yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR * name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 * buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev, int line_no);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR * str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 000000000000..3b508cbc4e8a
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,41 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ void (*put_super_fn) (struct super_block * sb);
+
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 000000000000..7cf53b3d91be
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,54 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+#include "yaffs_linux.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk
+ * dev->param.chunks_per_block;
+ struct erase_info ei;
+
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 000000000000..666507417fec
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no);
+int nandmtd_initialise(struct yaffs_dev *dev);
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif1.c b/fs/yaffs2/yaffs_mtdif1.c
new file mode 100644
index 000000000000..51083695eb33
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -0,0 +1,330 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API. This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
+#include "yaffs_linux.h"
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND. A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ * - Pack ExtendedTags to packed_tags1 form
+ * - Compute mini-ECC for packed_tags1
+ * - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the packed_tags1 meta-data which does not include
+ * a full sequence number (as found in the larger packed_tags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty. This is not ideal: newer NAND parts are supposed
+ * to be written just once. When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+
+ /* we assume that packed_tags1 and struct yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, etags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * etags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+#else
+ ((u8 *) & pt1)[8] = 0xff;
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* zero page_status byte to indicate deleted */
+ ((u8 *) & pt1)[8] = 0;
+ }
+#endif
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add ecc_result.
+ */
+static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval)
+{
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+ etags->ecc_result = ecc_result;
+ }
+ return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except ecc_result and block_bad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the packed_tags1 mini-ECC (correct if necessary/possible)
+ * - Convert packed_tags1 to ExtendedTags
+ * - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+ int deleted;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ etags->block_bad = (mtd->block_isbad) (mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+ if (yaffs_check_ff((u8 *) & pt1, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+#else
+ deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7);
+#endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xFFFFFFFF;
+ yaffs_unpack_tags1(etags, &pt1);
+ etags->ecc_result = eccres;
+
+ /* Set deleted state */
+ etags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "marking block %d bad", block_no);
+
+ retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_test_prerequists(struct mtd_info *mtd)
+{
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "mtd device has only %d bytes for tags, need %d",
+ oobavail, YTAG1_SIZE);
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state_ptr, u32 * seq_ptr)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_num = block_no * dev->param.chunks_per_block;
+ loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk;
+ struct yaffs_ext_tags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+ if (nandmtd1_test_prerequists(mtd) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags);
+ etags.block_bad = (mtd->block_isbad) (mtd, addr);
+ if (etags.block_bad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is marked bad", block_no);
+ state = YAFFS_BLOCK_STATE_DEAD;
+ } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ /* bad tags, need to look more closely */
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else if (etags.chunk_used) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.seq_number;
+ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ *state_ptr = state;
+ *seq_ptr = seqnum;
+
+ /* query always succeeds */
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif1.h b/fs/yaffs2/yaffs_mtdif1.h
new file mode 100644
index 000000000000..07ce4524f0f6
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF1_H__
+#define __YAFFS_MTDIF1_H__
+
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no);
+
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif2.c b/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 000000000000..d1643df2c381
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,225 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_linux.h"
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size total_bytes_per_chunk so that we can also
+ * use it to load the tags.
+ */
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+ int retval = 0;
+
+ loff_t addr;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_write_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd->write_oob(mtd, addr, &ops);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+
+ size_t dummy;
+ int retval = 0;
+ int local_data = 0;
+
+ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_read_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev, __LINE__);
+ }
+
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = packed_tags_size;
+ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+ ops.oobbuf = yaffs_dev_to_lc(dev)->spare_buffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)&data[dev->
+ data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+ memcpy(packed_tags_ptr,
+ yaffs_dev_to_lc(dev)->spare_buffer,
+ packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ if (tags && retval == -EBADMSG
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+ if (tags && retval == -EUCLEAN
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_mark_block_bad %d", block_no);
+
+ retval =
+ mtd->block_markbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+
+}
+
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "nandmtd2_query_block %d", block_no);
+ retval =
+ mtd->block_isbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+ nandmtd2_read_chunk_tags(dev, block_no *
+ dev->param.chunks_per_block, NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block is bad seq %d state %d", *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
diff --git a/fs/yaffs2/yaffs_mtdif2.h b/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 000000000000..d82112610d04
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no);
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 000000000000..daa36f989d31
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,201 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of values and fits
+ * into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR * name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (strncmp
+ ((YCHAR *) (xb + pos + sizeof(int)), name, size) == 0) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -1;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR * name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+ /* Find size, shift rest over this record, then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+ } else {
+ return -ENODATA;
+ }
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+
+ }
+ if (pos >= 0)
+ return -ERANGE;
+ else
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) && size <= xb_size && (pos + size) < xb_size
+ && !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 000000000000..2bb02b627628
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 000000000000..e816cabf43f8
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,127 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsvalidity.h"
+
+#include "yaffs_getblockinfo.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+
+ int realigned_chunk = nand_chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if (!tags)
+ tags = &local_tags;
+
+ if (dev->param.read_chunk_tags_fn)
+ result =
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk, buffer,
+ tags);
+ else
+ result = yaffs_tags_compat_rd(dev,
+ realigned_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags)
+{
+
+ dev->n_page_writes++;
+
+ nand_chunk -= dev->chunk_offset;
+
+ if (tags) {
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ if (!yaffs_validate_tags(tags)) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing uninitialised tags");
+ YBUG();
+ }
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ YBUG();
+ }
+
+ if (dev->param.write_chunk_tags_fn)
+ return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+ tags);
+ else
+ return yaffs_tags_compat_wr(dev, nand_chunk, buffer, tags);
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.bad_block_fn)
+ return dev->param.bad_block_fn(dev, block_no);
+ else
+ return yaffs_tags_compat_mark_bad(dev, block_no);
+}
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.query_block_fn)
+ return dev->param.query_block_fn(dev, block_no, state,
+ seq_number);
+ else
+ return yaffs_tags_compat_query_block(dev, block_no,
+ state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block)
+{
+ int result;
+
+ flash_block -= dev->block_offset;
+
+ dev->n_erasures++;
+
+ result = dev->param.erase_fn(dev, flash_block);
+
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->param.initialise_flash_fn)
+ return dev->param.initialise_flash_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 000000000000..543f1987124e
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 000000000000..a77f0954fc13
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,53 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xFFFFFFFF;
+
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+ static const u8 all_ff[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff
+ };
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xFFFFFFFF)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 000000000000..d6861ff505e4
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 000000000000..8e7fea3d2860
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,196 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_tagsvalidity.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xF0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(const struct
+ yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ if (t->chunk_id == 0 && t->extra_available) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = t->extra_length;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ yaffs_init_tags(t);
+
+ if (ptt->seq_number != 0xFFFFFFFF) {
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id =
+ ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink =
+ (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extra_shadows =
+ (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extra_obj_type =
+ ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_length = ptt->n_bytes;
+ }
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xFFFFFFFF && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 000000000000..f3296697bc0c
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 000000000000..7578075d9ac1
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+void yaffs_calc_ecc(const u8 * data, struct yaffs_spare *spare)
+{
+ yaffs_ecc_cacl(data, spare->ecc1);
+ yaffs_ecc_cacl(&data[256], spare->ecc2);
+}
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+
+ tags->ecc = ecc;
+
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xFF, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ struct yaffs_spare *spare)
+{
+ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d is not valid",
+ nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+
+ if (!dev->param.use_nand_ecc) {
+ ret_val =
+ dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && correct_errors) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ yaffs_ecc_cacl(data, calc_ecc);
+ ecc_result1 =
+ yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_cacl(&data[256], calc_ecc);
+ ecc_result2 =
+ yaffs_ecc_correct(&data[256], spare->ecc2,
+ calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+ ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
+ (struct yaffs_spare *)
+ &nspare);
+ memcpy(spare, &nspare, sizeof(struct yaffs_spare));
+ if (data && correct_errors) {
+ if (nspare.eccres1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ } else if (nspare.eccres1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ }
+
+ if (nspare.eccres2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ } else if (nspare.eccres2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev,
+ flash_block + dev->block_offset)->needs_retiring =
+ 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & 0x3ff;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data)
+ yaffs_calc_ecc(data, &spare);
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+
+ }
+
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *ext_tags)
+{
+
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+ static struct yaffs_spare spare_ff;
+ static int init;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (yaffs_rd_chunk_nand(dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+ /* ext_tags may be NULL */
+ if (ext_tags) {
+
+ int deleted =
+ (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ (memcmp(&spare_ff, &spare, sizeof(spare_ff)) !=
+ 0) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.
+ n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+ }
+
+ return YAFFS_OK;
+ } else {
+ return YAFFS_FAIL;
+ }
+}
+
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 1);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 000000000000..8cd35dcd3cab
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,36 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number);
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+int yaffs_count_bits(u8 byte);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsvalidity.c b/fs/yaffs2/yaffs_tagsvalidity.c
new file mode 100644
index 000000000000..4358d79d4bec
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.c
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_tagsvalidity.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags)
+{
+ memset(tags, 0, sizeof(struct yaffs_ext_tags));
+ tags->validity0 = 0xAAAAAAAA;
+ tags->validity1 = 0x55555555;
+}
+
+int yaffs_validate_tags(struct yaffs_ext_tags *tags)
+{
+ return (tags->validity0 == 0xAAAAAAAA && tags->validity1 == 0x55555555);
+
+}
diff --git a/fs/yaffs2/yaffs_tagsvalidity.h b/fs/yaffs2/yaffs_tagsvalidity.h
new file mode 100644
index 000000000000..36a021fc8fa8
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGS_VALIDITY_H__
+#define __YAFFS_TAGS_VALIDITY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags);
+int yaffs_validate_tags(struct yaffs_ext_tags *tags);
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 000000000000..6273dbf9f63f
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xF0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 000000000000..738c7f69a5ec
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,535 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char *block_state_name[] = {
+ "Unknown",
+ "Needs scanning",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0
+ || bi->pages_in_use > dev->param.chunks_per_block
+ || bi->soft_del_pages < 0
+ || bi->soft_del_pages > dev->param.chunks_per_block
+ || actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+ * case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Trashed name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xFF",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ int required_depth;
+ int actual_depth;
+ u32 last_chunk;
+ u32 x;
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+ /* Check file size is consistent with tnode depth */
+ last_chunk =
+ obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (tn) {
+ u32 the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ u32 chunk_min;
+ u32 chunk_max;
+
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent
+ && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent" );
+ YBUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ YBUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ YBUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ in = in;
+ return YAFFS_OK;
+}
+
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 000000000000..cc6f88999305
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 000000000000..d5b875314005
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,2792 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * NB There are two variants of Linux VFS glue code. This variant supports
+ * a single version and should not include any multi-version code.
+ */
+#include <linux/version.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include <linux/exportfs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+
+#include <asm/div64.h>
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#define YPROC_ROOT NULL
+
+#define Y_INIT_TIMER(a) init_timer_on_stack(a)
+
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif1.h"
+#include "yaffs_mtdif2.h"
+
+unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+
+/* Module Parameters */
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+
+
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#define yaffs_inode_to_obj(iptr) ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while(0)
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d",
+ obj->obj_id);
+
+ inode = yaffs_iget(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL;
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_loookup dentry");
+ d_add(dentry, inode);
+ /* return dentry; */
+ return NULL;
+ }
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ dentry->d_inode->i_nlink--;
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+ mark_inode_dirty(dentry->d_inode);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+static int yaffs_sync_object(struct file *file, int datasync)
+{
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct dentry *dentry = file->f_path.dentry;
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+
+ if (error == 0)
+ error = inode_change_ok(inode, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ setattr_copy(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ truncate_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS, "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+#ifdef CONFIG_YAFFS_XATTR
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, void *buff,
+ size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+#endif
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ if (i) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+ }
+
+}
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+
+
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id, obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/* ExportFS support */
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return yaffs_iget(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = yaffs_iget(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ unsigned char *alias;
+ void *ret;
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ nd_set_link(nd, alias);
+ ret = (void *)alias;
+out:
+ return ret;
+}
+
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+ kfree(alias);
+}
+
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino,
+ atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+static void yaffs_touch_super(struct yaffs_dev *dev)
+{
+ struct super_block *sb = yaffs_dev_to_lc(dev)->super;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_touch_super() sb = %p", sb);
+ if (sb)
+ sb->s_dirt = 1;
+}
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %08x, size %08x",
+ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+ (unsigned)PAGE_CACHE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ BUG_ON(!PageLocked(pg));
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf,
+ pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+/* writepage inspired by/stolen from smbfs */
+
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_CACHE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_CACHE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, inode size = %08x!!!",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT),
+ (unsigned)inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_CACHE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, size %08x",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for
+ * write_begin/end.
+ * For now we just assume few parallel writes and check against a small
+ * number.
+ * Todo: need to do this with a counter to handle parallel reads better.
+ */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ page_cache_release(pg);
+ }
+ return ret;
+}
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written, ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ if (!obj)
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ else
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %d(%x)",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %d bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %x n_bytes %d",
+ addr, (unsigned)pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ page_cache_release(pg);
+ return ret;
+}
+
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d", obj->obj_id);
+ yaffs_flush_file(obj, 1, 0);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ sb->s_dirt ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (sb->s_dirt || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ sb->s_dirt = 0;
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+void yaffs_background_waker(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct timer_list timer;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+ set_freezable();
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&timer);
+ timer.expires = expires + 1;
+ timer.data = (unsigned long)current;
+ timer.function = yaffs_background_waker;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+
+static void yaffs_write_super(struct super_block *sb)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super%s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+}
+
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+
+
+struct yaffs_options {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+};
+
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .put_link = yaffs_put_link,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ inode->i_nlink = yaffs_get_obj_link_count(obj);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %d count %d",
+ inode->i_mode, inode->i_uid, inode->i_gid,
+ (int)inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+}
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ if (yaffs_dev_to_lc(dev)->put_super_fn)
+ yaffs_dev_to_lc(dev)->put_super_fn(sb);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ kfree(dev);
+}
+
+static void yaffs_mtd_put_super(struct super_block *sb)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_super_to_dev(sb));
+
+ if (mtd->sync)
+ mtd->sync(mtd);
+
+ put_mtd_device(mtd);
+}
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+ .put_super = yaffs_put_super,
+ .evict_inode = yaffs_evict_inode,
+ .sync_fs = yaffs_sync_fs,
+ .write_super = yaffs_write_super,
+};
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+ sb->s_export_op = &yaffs_export_ops;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ else if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device #%u doesn't appear to exist",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " erase %p", mtd->erase);
+ yaffs_trace(YAFFS_TRACE_OS, " read %p", mtd->read);
+ yaffs_trace(YAFFS_TRACE_OS, " write %p", mtd->write);
+ yaffs_trace(YAFFS_TRACE_OS, " readoob %p", mtd->read_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " writeoob %p", mtd->write_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " block_isbad %p", mtd->block_isbad);
+ yaffs_trace(YAFFS_TRACE_OS, " block_markbad %p", mtd->block_markbad);
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+
+#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+ if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 && !options.inband_tags
+ && WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+#endif
+
+ if (yaffs_version == 2) {
+ /* Check for version 2 style functions */
+ if (!mtd->erase ||
+ !mtd->block_isbad ||
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !options.inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes");
+ return NULL;
+ }
+ } else {
+ /* Check for V1 style functions */
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes");
+ return NULL;
+ }
+ }
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only");
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ if (dev)
+ kfree(dev);
+ if (context)
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+ }
+
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super failed trying to allocate yaffs_dev");
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+ sb->s_fs_info = dev;
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+ n_blocks =
+ YCALCBLOCKS(mtd->size,
+ (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = options.inband_tags;
+
+#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+ param->disable_lazy_load = 1;
+#endif
+#ifdef CONFIG_YAFFS_XATTR
+ param->enable_xattr = 1;
+#endif
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
+ param->no_tags_ecc = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
+#else
+ param->defered_dir_update = 1;
+#endif
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+ param->empty_lost_n_found = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
+ param->refresh_period = 0;
+#else
+ param->refresh_period = 500;
+#endif
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ param->always_check_erased = 1;
+#endif
+
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->write_chunk_tags_fn = nandmtd2_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd2_read_chunk_tags;
+ param->bad_block_fn = nandmtd2_mark_block_bad;
+ param->query_block_fn = nandmtd2_query_block;
+ yaffs_dev_to_lc(dev)->spare_buffer =
+ kmalloc(mtd->oobsize, GFP_NOFS);
+ param->is_yaffs2 = 1;
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ /* use the MTD interface in yaffs_mtdif1.c */
+ param->write_chunk_tags_fn = nandmtd1_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd1_read_chunk_tags;
+ param->bad_block_fn = nandmtd1_mark_block_bad;
+ param->query_block_fn = nandmtd1_query_block;
+ param->is_yaffs2 = 0;
+ }
+ /* ... and common functions */
+ param->erase_fn = nandmtd_erase_block;
+ param->initialise_flash_fn = nandmtd_initialise;
+
+ yaffs_dev_to_lc(dev)->put_super_fn = yaffs_mtd_put_super;
+
+ param->sb_dirty_fn = yaffs_touch_super;
+ param->gc_control = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+#ifndef CONFIG_YAFFS_DOES_ECC
+ param->use_nand_ecc = 1;
+#endif
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+ root = d_alloc_root(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: d_alloc_root done");
+
+ if (!root) {
+ iput(inode);
+ return NULL;
+ }
+ sb->s_root = root;
+ sb->s_dirt = !dev->is_checkpointed;
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+ return sb;
+}
+
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+ .get_sb = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+ .get_sb = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#endif /* CONFIG_YAFFS_YAFFS2 */
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+ buf += sprintf(buf, "start_block........... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............. %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk. %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc.......... %d\n",
+ param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc........... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............. %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags........... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found.... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load..... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "refresh_period........ %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches.............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks..... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased... %d\n",
+ param->always_check_erased);
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf +=
+ sprintf(buf, "data_bytes_per_chunk.. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits........ %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size........ %d\n", dev->chunk_grp_size);
+ buf +=
+ sprintf(buf, "n_erased_blocks....... %d\n", dev->n_erased_blocks);
+ buf +=
+ sprintf(buf, "blocks_in_checkpt..... %d\n", dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes.............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................. %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks......... %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes......... %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads.......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures............ %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies........... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs............... %u\n", dev->all_gcs);
+ buf +=
+ sprintf(buf, "passive_gc_count...... %u\n", dev->passive_gc_count);
+ buf +=
+ sprintf(buf, "oldest_dirty_gc_count. %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks........... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs................ %u\n", dev->bg_gcs);
+ buf +=
+ sprintf(buf, "n_retired_writes...... %u\n", dev->n_retired_writes);
+ buf +=
+ sprintf(buf, "n_retired_blocks...... %u\n", dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed........... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed......... %u\n", dev->n_ecc_unfixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_fixed...... %u\n", dev->n_tags_ecc_fixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_unfixed.... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits............ %u\n", dev->cache_hits);
+ buf +=
+ sprintf(buf, "n_deleted_files....... %u\n", dev->n_deleted_files);
+ buf +=
+ sprintf(buf, "n_unlinked_files...... %u\n", dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count......... %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions........ %u\n", dev->n_bg_deletions);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__ "\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n");
+#endif
+
+ mutex_init(&yaffs_context_lock);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " removing.");
+
+ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 000000000000..9eb603082547
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,433 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+
+ int alloc_failed = 0;
+
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED
+ || tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ /*T((" %d %d deleted\n",blk,c)); */
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated from */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+ /* Set block finder here to encourage the allocator to go forth from here. */
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash (two data chunks with
+ * the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in
+ && in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE
+ && in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that has been reused but not yet
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+
+ in = 0;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one. We have a duplicate and need to resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ if (0 && (parent == dev->del_dir ||
+ parent ==
+ dev->unlinked_dir)) {
+ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+ dev->n_deleted_files++;
+ }
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run through this
+ * list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+
+ in->variant.
+ file_variant.file_size
+ = oh->file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+
+ yaffs_link_fixup(dev, hard_list);
+
+ /* Fix up any shadowed objects */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the shadowed object
+ * then setting the object header to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 000000000000..db23e04973ba
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 000000000000..33397af7003d
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1598 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block && b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this one that have
+ * discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev * dev)
+{
+ u32 b;
+
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks =
+ (dev->param.end_block - dev->param.start_block + 1);
+
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) +
+ sizeof(u32)) * (dev->n_obj);
+ n_bytes += (dev->tnode_size + sizeof(u32)) * (dev->n_tnodes);
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+ if (ok) {
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) ==
+ n_bytes);
+ }
+
+ /* Write chunk bits */
+ if (ok) {
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) ==
+ n_bytes);
+ }
+ return ok ? 1 : 0;
+
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int taffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->
+ internal
+ [i],
+ level -
+ 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ }
+ }
+ } else if (level == 0) {
+ u32 base_offset =
+ chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr
+ (dev, &base_offset,
+ sizeof(base_offset)) == sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (dev, tn,
+ dev->tnode_size) == dev->tnode_size);
+ }
+ }
+
+ return ok;
+
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+ }
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn) {
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ } else {
+ ok = 0;
+ }
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr
+ (dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok
+ && obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes
+ (obj);
+ }
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xFF, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ struct yaffs_obj *hard_list = NULL;
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = taffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hard_links.next =
+ (struct list_head *)hard_list;
+ hard_list = obj;
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start of hole marker.
+ */
+
+ loff_t old_file_size;
+ int increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+
+ u8 *local_buffer = NULL;
+
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ int pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+
+ /* If we were out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+ if (aseq == bseq)
+ return ablock - bblock;
+ else
+ return aseq - bseq;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+
+ int chunk;
+ int result;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int is_unlinked;
+ u8 *chunk_data;
+
+ int file_size;
+ int is_shrink;
+ int found_chunks;
+ int equiv_id;
+ int alloc_failed = 0;
+
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index = kmalloc(n_blocks * sizeof(struct yaffs_block_index),
+ GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+
+ n_to_scan++;
+
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter; !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+
+ bi = yaffs_get_block_info(dev, blk);
+
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ for (c = dev->param.chunks_per_block - 1;
+ !alloc_failed && c >= 0 &&
+ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing the erased
+ * check. Just skip it so that it can be deleted.
+ * But, more typically, We get here when this is an unallocated
+ * chunk and his means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (found_chunks) {
+ /* This is a chunk that was skipped due to failing the erased check */
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING
+ || state ==
+ YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number ==
+ bi->seq_number) {
+ /* this is the block being allocated from */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+
+ state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->
+ alloc_block_finder =
+ blk;
+ } else {
+ /* This is a partially written block that is not
+ * the current allocation block.
+ */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, c);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ (tags.chunk_id > 0
+ && tags.n_bytes > dev->data_bytes_per_chunk)
+ || tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, c, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+ u32 chunk_base =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk;
+
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in) {
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE
+ && chunk_base <
+ in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by a resize */
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, -1)) {
+ alloc_failed = 1;
+ }
+
+ /* File size is calculated by looking at the data chunks if we have not
+ * seen an object header yet. Stop this practice once we find an object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid && /* have not got an object header yet */
+ in->variant.file_variant.
+ scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+
+ } else if (in) {
+ /* This chunk has been invalidated by a resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.
+ obj_id,
+ tags.
+ extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load)
+ || tags.extra_shadows || (!in->valid
+ && (tags.obj_id ==
+ YAFFS_OBJECTID_ROOT
+ || tags.
+ obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)))
+ {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+ * living with invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ continue;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be discarded, but
+ * we first have to suck out resize info if it is a file.
+ */
+
+ if ((in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) && ((oh
+ &&
+ oh->
+ type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ||
+ (tags.
+ extra_available
+ &&
+ tags.
+ extra_obj_type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)))
+ {
+ u32 this_size =
+ (oh) ? oh->
+ file_size :
+ tags.extra_length;
+ u32 parent_obj_id =
+ (oh) ? oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink =
+ (oh) ? oh->
+ is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked at start also means deleted)
+ * we treat the file size as being zeroed at this point.
+ */
+ if (parent_obj_id ==
+ YAFFS_OBJECTID_DELETED
+ || parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink
+ && in->variant.file_variant.
+ shrink_size > this_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ?
+ oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+
+ if (oh) {
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj
+ (dev,
+ oh->shadows_obj,
+ 1);
+
+ yaffs_set_obj_name_from_oh(in,
+ oh);
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ file_size = oh->file_size;
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+
+ } else {
+ in->variant_type =
+ tags.extra_obj_type;
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_length;
+ is_shrink =
+ tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir)
+ || (parent == dev->unlinked_dir);
+
+ if (is_shrink) {
+ /* Mark the block as having a shrink header */
+ bi->has_shrink_hdr = 1;
+ }
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run
+ * through this list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+
+ if (in->variant.
+ file_variant.scanned_size <
+ file_size) {
+ /* This covers the case where the file size is greater
+ * than where the data is
+ * This will happen if the file is resized to be larger
+ * than its current data extents.
+ */
+ in->variant.
+ file_variant.
+ file_size =
+ file_size;
+ in->variant.
+ file_variant.
+ scanned_size =
+ file_size;
+ }
+
+ if (in->variant.file_variant.
+ shrink_size > file_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ if (!is_unlinked) {
+ in->variant.
+ hardlink_variant.
+ equiv_id = equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (oh) {
+ in->variant.
+ symlink_variant.
+ alias =
+ yaffs_clone_str(oh->
+ alias);
+ if (!in->variant.
+ symlink_variant.
+ alias)
+ alloc_failed =
+ 1;
+ }
+ break;
+ }
+
+ }
+
+ }
+
+ } /* End of scanning for each chunk */
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 000000000000..e1a9287fc506
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 000000000000..8183425448cd
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,70 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_LINUX_H__
+#define __YPORTENV_LINUX_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#ifndef Y_DUMP_STACK
+#define Y_DUMP_STACK() dump_stack()
+#endif
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if(yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while(0)
+
+#ifndef YBUG
+#define YBUG() do {\
+ yaffs_trace(YAFFS_TRACE_BUG,\
+ "bug " __FILE__ " %d",\
+ __LINE__);\
+ Y_DUMP_STACK();\
+} while (0)
+#endif
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 701beab27aab..5cf680a98f9b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn)
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- return page_to_pfn(ZERO_PAGE(addr));
-}
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
#else
static inline int is_zero_pfn(unsigned long pfn)
{
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0f4a366f6fa6..3527fb3f75bb 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -70,7 +70,7 @@ struct drm_mm {
unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
- unsigned scan_hit_size;
+ unsigned long scan_hit_end;
unsigned scanned_blocks;
unsigned long scan_start;
unsigned long scan_end;
diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h
new file mode 100644
index 000000000000..6a7c43260018
--- /dev/null
+++ b/include/linux/akm8975.h
@@ -0,0 +1,87 @@
+/*
+ * Definitions for akm8975 compass chip.
+ */
+#ifndef AKM8975_H
+#define AKM8975_H
+
+#include <linux/ioctl.h>
+
+/*! \name AK8975 operation mode
+ \anchor AK8975_Mode
+ Defines an operation mode of the AK8975.*/
+/*! @{*/
+#define AK8975_MODE_SNG_MEASURE 0x01
+#define AK8975_MODE_SELF_TEST 0x08
+#define AK8975_MODE_FUSE_ACCESS 0x0F
+#define AK8975_MODE_POWER_DOWN 0x00
+/*! @}*/
+
+#define RBUFF_SIZE 8 /* Rx buffer size */
+
+/*! \name AK8975 register address
+\anchor AK8975_REG
+Defines a register address of the AK8975.*/
+/*! @{*/
+#define AK8975_REG_WIA 0x00
+#define AK8975_REG_INFO 0x01
+#define AK8975_REG_ST1 0x02
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+#define AK8975_REG_ST2 0x09
+#define AK8975_REG_CNTL 0x0A
+#define AK8975_REG_RSV 0x0B
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+#define AK8975_REG_I2CDIS 0x0F
+/*! @}*/
+
+/*! \name AK8975 fuse-rom address
+\anchor AK8975_FUSE
+Defines a read-only address of the fuse ROM of the AK8975.*/
+/*! @{*/
+#define AK8975_FUSE_ASAX 0x10
+#define AK8975_FUSE_ASAY 0x11
+#define AK8975_FUSE_ASAZ 0x12
+/*! @}*/
+
+#define AKMIO 0xA1
+
+/* IOCTLs for AKM library */
+#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5])
+#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5])
+#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE])
+#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12])
+#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int)
+#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int)
+#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short)
+
+/* IOCTLs for APPs */
+#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short)
+#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short)
+#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short)
+#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short)
+#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short)
+#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY
+/* Set raw magnetic vector flag */
+#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short)
+/* Get raw magnetic vector flag */
+#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short)
+#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short)
+
+
+struct akm8975_platform_data {
+ int intr;
+
+ int (*init)(void);
+ void (*exit)(void);
+ int (*power_on)(void);
+ int (*power_off)(void);
+};
+
+#endif
+
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 32a89cf5ec45..9d1d9caf2611 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -5,6 +5,15 @@
#define AMBA_MMCI_H
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+ struct sdio_cis cis;
+ struct sdio_cccr cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+};
/*
@@ -73,6 +82,9 @@ struct mmci_platform_data {
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
+ unsigned int status_irq;
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
};
#endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 000000000000..0f904b3ba7f0
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_NET_BT_ADMIN 3001
+#define AID_NET_BT 3002
+#define AID_INET 3003
+#define AID_NET_RAW 3004
+#define AID_NET_ADMIN 3005
+#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */
+#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */
+
+#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index bce729afbcf9..5a6d718adf34 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -24,6 +24,7 @@
#define _LINUX_AUDIT_H_
#include <linux/sched.h>
+#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
struct audit_sig_info {
@@ -157,7 +158,8 @@ void audit_core_dumps(long signr);
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{
- if (unlikely(!audit_dummy_context()))
+ /* Force a record to be reported if a signal was delivered. */
+ if (signr || unlikely(!audit_dummy_context()))
__audit_seccomp(syscall, signr, code);
}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 458f497738a4..13bba1796dbe 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -34,6 +34,7 @@ enum bh_state_bits {
BH_Write_EIO, /* I/O error on write */
BH_Unwritten, /* Buffer is allocated on disk but not written */
BH_Quiet, /* Buffer Error Prinks to be quiet */
+ BH_Meta, /* Is meta */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -124,6 +125,7 @@ BUFFER_FNS(Delay, delay)
BUFFER_FNS(Boundary, boundary)
BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Unwritten, unwritten)
+BUFFER_FNS(Meta, meta)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 7d73905dcba2..8cc87802bf5c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -254,6 +254,7 @@ struct css_set {
/* For RCU-protected deletion */
struct rcu_head rcu_head;
+ struct work_struct work;
};
/*
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 6ecb6dc2f303..cc7bddeaf553 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- bool sync, bool *contended, struct page **page);
+ bool sync, bool *contended);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync, bool *contended, struct page **page)
+ bool sync, bool *contended)
{
return COMPACT_CONTINUE;
}
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ce7a074f2519..da86e38349b5 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -212,4 +212,11 @@ static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index ac3bbb5b9502..1739510d8994 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -13,9 +13,11 @@
#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/kref.h>
/**
* struct cpu_rmap - CPU affinity reverse-map
+ * @refcount: kref for object
* @size: Number of objects to be reverse-mapped
* @used: Number of objects added
* @obj: Pointer to array of object pointers
@@ -23,6 +25,7 @@
* based on affinity masks
*/
struct cpu_rmap {
+ struct kref refcount;
u16 size, used;
void **obj;
struct {
@@ -33,15 +36,7 @@ struct cpu_rmap {
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
-
-/**
- * free_cpu_rmap - free CPU affinity reverse-map
- * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
- */
-static inline void free_cpu_rmap(struct cpu_rmap *rmap)
-{
- kfree(rmap);
-}
+extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a55b88eaf96a..7789de86cd9a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -93,7 +93,9 @@ struct cpufreq_policy {
cpumask_var_t related_cpus; /* CPUs with any coordination */
unsigned int shared_type; /* ANY or ALL affected CPUs
should set cpufreq */
- unsigned int cpu; /* cpu nr of registered CPU */
+ unsigned int cpu; /* cpu nr of CPU managing this policy */
+ unsigned int last_cpu; /* cpu nr of previous CPU that managed
+ * this policy */
struct cpufreq_cpuinfo cpuinfo;/* see above */
unsigned int min; /* in kHz */
@@ -112,10 +114,11 @@ struct cpufreq_policy {
struct completion kobj_unregister;
};
-#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
+#define CPUFREQ_ADJUST (0)
+#define CPUFREQ_INCOMPATIBLE (1)
+#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_START (3)
+#define CPUFREQ_UPDATE_POLICY_CPU (4)
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
@@ -367,6 +370,9 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
extern struct cpufreq_governor cpufreq_gov_conservative;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
#endif
@@ -405,6 +411,7 @@ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3711b34dc4f9..24cd1037b6d6 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -126,9 +126,9 @@ struct cpuidle_driver {
struct module *owner;
int refcnt;
- unsigned int power_specified:1;
/* set to 1 to use the core cpuidle time keeping (for all states). */
unsigned int en_core_tk_irqen:1;
+ /* states array must be ordered in decreasing power consumption */
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 000000000000..2613fc5e4a93
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+ int count;
+ struct input_dev *dev[];
+};
+enum {
+ GPIO_EVENT_FUNC_UNINIT = 0x0,
+ GPIO_EVENT_FUNC_INIT = 0x1,
+ GPIO_EVENT_FUNC_SUSPEND = 0x2,
+ GPIO_EVENT_FUNC_RESUME = 0x3,
+};
+struct gpio_event_info {
+ int (*func)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, int func);
+ int (*event)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value); /* out events */
+ bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+ const char *name;
+ struct gpio_event_info **info;
+ size_t info_count;
+ int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+ const char *names[]; /* If name is NULL, names contain a NULL */
+ /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+ /* unset: drive active output low, set: drive active output high */
+ GPIOKPF_ACTIVE_HIGH = 1U << 0,
+ GPIOKPF_DEBOUNCE = 1U << 1,
+ GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+ GPIOKPF_REMOVE_PHANTOM_KEYS = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+ GPIOKPF_DEBOUNCE,
+ GPIOKPF_DRIVE_INACTIVE = 1U << 3,
+ GPIOKPF_LEVEL_TRIGGERED_IRQ = 1U << 4,
+ GPIOKPF_PRINT_UNMAPPED_KEYS = 1U << 16,
+ GPIOKPF_PRINT_MAPPED_KEYS = 1U << 17,
+ GPIOKPF_PRINT_PHANTOM_KEYS = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+ (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+ /* initialize to gpio_event_matrix_func */
+ struct gpio_event_info info;
+ /* size must be ninputs * noutputs */
+ const unsigned short *keymap;
+ unsigned int *input_gpios;
+ unsigned int *output_gpios;
+ unsigned int ninputs;
+ unsigned int noutputs;
+ /* time to wait before reading inputs after driving each output */
+ ktime_t settle_time;
+ /* time to wait before scanning the keypad a second time */
+ ktime_t debounce_delay;
+ ktime_t poll_time;
+ unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+ GPIOEDF_ACTIVE_HIGH = 1U << 0,
+/* GPIOEDF_USE_DOWN_IRQ = 1U << 1, */
+/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+ GPIOEDF_PRINT_KEYS = 1U << 8,
+ GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9,
+ GPIOEDF_PRINT_KEY_UNSTABLE = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+ uint32_t gpio:16;
+ uint32_t code:10;
+ uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+ /* initialize to gpio_event_input_func */
+ struct gpio_event_info info;
+ ktime_t debounce_time;
+ ktime_t poll_time;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data,
+ unsigned int dev, unsigned int type,
+ unsigned int code, int value);
+struct gpio_event_output_info {
+ /* initialize to gpio_event_output_func and gpio_event_output_event */
+ struct gpio_event_info info;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+ GPIOEAF_PRINT_UNKNOWN_DIRECTION = 1U << 16,
+ GPIOEAF_PRINT_RAW = 1U << 17,
+ GPIOEAF_PRINT_EVENT = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+ /* initialize to gpio_event_axis_func */
+ struct gpio_event_info info;
+ uint8_t count; /* number of gpios for this axis */
+ uint8_t dev; /* device index when using multiple input devices */
+ uint8_t type; /* EV_REL or EV_ABS */
+ uint16_t code;
+ uint16_t decoded_size;
+ uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+ uint32_t *gpio;
+ uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 7330a0fef0c0..5fea1028bcd7 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -637,8 +637,8 @@ struct hid_driver {
int (*input_mapped)(struct hid_device *hdev,
struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max);
- void (*input_configured)(struct hid_device *hdev,
- struct hid_input *hidinput);
+ int (*input_configured)(struct hid_device *hdev,
+ struct hid_input *hidinput);
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 000000000000..c06bd6c8ba26
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OLAC */
+ int udp_socket;
+ struct __attribute__((packed)) {
+ __u16 tunnel, session;
+ } local, remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 000000000000..0cf34b4d551f
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OPNS */
+ int tcp_socket;
+ __u16 local;
+ __u16 remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index aff7ad8a4ea3..dccd621d6377 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -41,6 +41,25 @@ struct pptp_opt {
u32 seq_sent, seq_recv;
int ppp_flags;
};
+
+struct pppolac_opt {
+ __u32 local;
+ __u32 remote;
+ __u32 recv_sequence;
+ __u32 xmit_sequence;
+ atomic_t sequencing;
+ int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+ __u16 local;
+ __u16 remote;
+ __u32 recv_sequence;
+ __u32 xmit_sequence;
+ void (*data_ready)(struct sock *sk_raw, int length);
+ int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
#include <net/sock.h>
struct pppox_sock {
@@ -51,6 +70,8 @@ struct pppox_sock {
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
+ struct pppolac_opt lac;
+ struct pppopns_opt pns;
} proto;
__be16 num;
};
diff --git a/include/linux/init.h b/include/linux/init.h
index a799273714ac..10ed4f436458 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -93,14 +93,6 @@
#define __exit __section(.exit.text) __exitused __cold notrace
-/* Used for HOTPLUG, but that is always enabled now, so just make them noops */
-#define __devinit
-#define __devinitdata
-#define __devinitconst
-#define __devexit
-#define __devexitdata
-#define __devexitconst
-
/* Used for HOTPLUG_CPU */
#define __cpuinit __section(.cpuinit.text) __cold notrace
#define __cpuinitdata __section(.cpuinit.data)
@@ -337,18 +329,6 @@ void __init parse_early_options(char *cmdline);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
-/* Functions marked as __devexit may be discarded at kernel link time, depending
- on config options. Newer versions of binutils detect references from
- retained sections to discarded sections and flag an error. Pointers to
- __devexit functions must use __devexit_p(function_name), the wrapper will
- insert either the function_name or NULL, depending on the config options.
- */
-#if defined(MODULE) || defined(CONFIG_HOTPLUG)
-#define __devexit_p(x) x
-#else
-#define __devexit_p(x) NULL
-#endif
-
#ifdef MODULE
#define __exit_p(x) x
#else
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5e4e6170f43a..5fa5afeeb759 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -268,11 +268,6 @@ struct irq_affinity_notify {
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-static inline void irq_run_affinity_notifiers(void)
-{
- flush_scheduled_work();
-}
-
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 000000000000..4924a596dcdd
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,354 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_END: helper for iterating over heaps
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+
+/**
+ * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+#ifdef __KERNEL__
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating (lower numbers
+ * will be allocated from first)
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap heaps[];
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask, const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks have
+ * alignment requirements of some kind
+ * @heap_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from lowest to highest order bit
+ * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
+ * bits are passed on to the respective heap and can be heap
+ * custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_mask: mask of heaps to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to refer
+ * to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_mask;
+ unsigned int flags;
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ struct ion_handle *handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _LINUX_ION_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c566927efcbd..78a16c08e3c6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -733,4 +733,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
extern int do_sysinfo(struct sysinfo *info);
+/* To identify board information in panic logs, set this */
+extern char *mach_panic_string;
+
#endif
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 000000000000..856a5850217b
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ * Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION 1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed. A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+ /* should be KEYCHORD_VERSION */
+ __u16 version;
+ /*
+ * client specified ID, returned from read()
+ * when this keychord is pressed.
+ */
+ __u16 id;
+
+ /* number of keycodes in this keychord */
+ __u16 count;
+
+ /* variable length array of keycodes */
+ __u16 keycodes[];
+};
+
+#endif /* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 000000000000..a2ac49e5b684
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+ int (*reset_fn)(void);
+ int *keys_up;
+ int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 00e46376e28f..2bca44b0893c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
+# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
+# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
+# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 63204078f72b..85fb46357127 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
int split_free_page(struct page *page);
-int capture_free_page(struct page *page, int alloc_order, int migratetype);
/*
* Compound pages have a destructor function. Provide a
@@ -930,6 +929,7 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 61a10c17aabd..24d8cfd23241 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/wakelock.h>
#include <linux/mmc/core.h>
#include <linux/mmc/pm.h>
@@ -306,12 +307,17 @@ struct mmc_host {
int claim_cnt; /* "claim" nesting count */
struct delayed_work detect;
+ struct wake_lock detect_wake_lock;
int detect_change; /* card detect flag */
struct mmc_slot slot;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
+ unsigned int bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
bool sdio_irq_pending;
@@ -338,6 +344,15 @@ struct mmc_host {
unsigned int actual_clock; /* Actual HC clock rate */
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ struct {
+ struct sdio_cis *cis;
+ struct sdio_cccr *cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+ } embedded_sdio_data;
+#endif
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -346,6 +361,14 @@ extern int mmc_add_host(struct mmc_host *);
extern void mmc_remove_host(struct mmc_host *);
extern void mmc_free_host(struct mmc_host *);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs);
+#endif
+
static inline void *mmc_priv(struct mmc_host *host)
{
return (void *)host->private;
@@ -356,6 +379,18 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+ if (manual)
+ host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+ else
+ host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
extern int mmc_suspend_host(struct mmc_host *);
extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 4a139204c20c..6e2d6a135c7e 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -26,5 +26,6 @@ typedef unsigned int mmc_pm_flag_t;
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY (1 << 2) /* ignore mmc pm notify */
#endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 50f0bc952328..dc680c4b50d4 100644..100755
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -23,6 +23,14 @@ struct sdio_func;
typedef void (sdio_irq_handler_t)(struct sdio_func *);
/*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+ uint8_t f_class;
+ uint32_t f_maxblksize;
+};
+
+/*
* SDIO function CIS tuple (unknown to the core)
*/
struct sdio_func_tuple {
@@ -130,6 +138,8 @@ extern int sdio_release_irq(struct sdio_func *func);
extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+ unsigned in);
extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c599e4782d45..9ef07d0868b6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,6 +60,9 @@ struct wireless_dev;
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
+extern void netdev_set_default_ethtool_ops(struct net_device *dev,
+ const struct ethtool_ops *ops);
+
/* hardware address assignment types */
#define NET_ADDR_PERM 0 /* address is permanent (default) */
#define NET_ADDR_RANDOM 1 /* address is generated randomly */
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 000000000000..ca60fbdec2f3
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID XT_OWNER_UID
+#define XT_QTAGUID_GID XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 000000000000..eadc6903314e
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 1 << 0,
+ XT_QUOTA_GROW = 1 << 1,
+ XT_QUOTA_PACKET = 1 << 2,
+ XT_QUOTA_NO_CHANGE = 1 << 3,
+ XT_QUOTA_MASK = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+ char name[15];
+ u_int8_t flags;
+
+ /* Comparison-invariant */
+ aligned_u64 quota;
+
+ /* Used internally by the kernel */
+ struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/platform_data/android_battery.h b/include/linux/platform_data/android_battery.h
new file mode 100644
index 000000000000..f6c8298fd885
--- /dev/null
+++ b/include/linux/platform_data/android_battery.h
@@ -0,0 +1,47 @@
+/*
+ * android_battery.h
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_ANDROID_BATTERY_H
+#define _LINUX_ANDROID_BATTERY_H
+
+enum {
+ CHARGE_SOURCE_NONE = 0,
+ CHARGE_SOURCE_AC,
+ CHARGE_SOURCE_USB,
+};
+
+struct android_bat_callbacks {
+ void (*charge_source_changed)
+ (struct android_bat_callbacks *, int);
+ void (*battery_set_full)(struct android_bat_callbacks *);
+};
+
+struct android_bat_platform_data {
+ void (*register_callbacks)(struct android_bat_callbacks *);
+ void (*unregister_callbacks)(void);
+ void (*set_charging_current) (int);
+ void (*set_charging_enable) (int);
+ int (*poll_charge_source) (void);
+ int (*get_capacity) (void);
+ int (*get_temperature) (int *);
+ int (*get_voltage_now)(void);
+ int (*get_current_now)(int *);
+
+ int temp_high_threshold;
+ int temp_high_recovery;
+ int temp_low_recovery;
+ int temp_low_threshold;
+
+ unsigned long full_charging_time;
+ unsigned long recharging_time;
+ unsigned int recharging_voltage;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 1f0ab90aff00..587a0a63dc37 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -13,6 +13,7 @@
#ifndef __LINUX_POWER_SUPPLY_H__
#define __LINUX_POWER_SUPPLY_H__
+#include <linux/wakelock.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
@@ -190,6 +191,9 @@ struct power_supply {
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
#endif
+ spinlock_t changed_lock;
+ bool changed;
+ struct wake_lock work_wake_lock;
#ifdef CONFIG_LEDS_TRIGGERS
struct led_trigger *charging_full_trig;
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 2ac60c9cf644..fea49b5da12a 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
-static __always_inline void
-rb_erase_augmented(struct rb_node *node, struct rb_root *root,
- const struct rb_augment_callbacks *augment)
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
}
augment->propagate(tmp, NULL);
+ return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 54bd7cd7ecbd..8da67d625e13 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem);
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+
+# define down_write_nest_lock(sem, nest_lock) \
+do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
+} while (0);
+
#else
# define down_read_nested(sem, subclass) down_read(sem)
+# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 206bb089c06b..b15d69803fd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -657,6 +657,7 @@ struct signal_struct {
#endif
oom_flags_t oom_flags;
+ int oom_adj; /* OOM kill score adjustment (bit shift) */
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
@@ -1020,6 +1021,12 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
+#ifdef CONFIG_SCHED_HMP
+struct hmp_domain {
+ struct cpumask cpus;
+ struct list_head hmp_domains;
+};
+#endif /* CONFIG_SCHED_HMP */
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -1132,6 +1139,12 @@ struct sched_avg {
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+ unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+ u64 hmp_last_up_migration;
+ u64 hmp_last_down_migration;
+#endif
+ u32 usage_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
@@ -1795,6 +1808,9 @@ static inline void put_task_struct(struct task_struct *t)
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
/*
* Per process flags
*/
@@ -1810,6 +1826,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
+#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index c6690a2a27fb..6cf2d59dbcbe 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -61,6 +61,7 @@ struct uart_ops {
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
int (*set_wake)(struct uart_port *, unsigned int state);
+ void (*wake_peer)(struct uart_port *);
/*
* Return a string describing the type of the port
diff --git a/include/linux/sw_sync.h b/include/linux/sw_sync.h
new file mode 100644
index 000000000000..bd6f2089e77f
--- /dev/null
+++ b/include/linux/sw_sync.h
@@ -0,0 +1,58 @@
+/*
+ * include/linux/sw_sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SW_SYNC_H
+#define _LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#include <linux/sync.h>
+
+struct sw_sync_timeline {
+ struct sync_timeline obj;
+
+ u32 value;
+};
+
+struct sw_sync_pt {
+ struct sync_pt pt;
+
+ u32 value;
+};
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+
+#endif /* __KERNEL __ */
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+
+#endif /* _LINUX_SW_SYNC_H */
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644
index 000000000000..3e4c748e343a
--- /dev/null
+++ b/include/linux/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h
new file mode 100644
index 000000000000..5539cc520779
--- /dev/null
+++ b/include/linux/synaptics_i2c_rmi.h
@@ -0,0 +1,55 @@
+/*
+ * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNAPTICS_I2C_RMI_H
+#define _LINUX_SYNAPTICS_I2C_RMI_H
+
+#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts"
+
+enum {
+ SYNAPTICS_FLIP_X = 1UL << 0,
+ SYNAPTICS_FLIP_Y = 1UL << 1,
+ SYNAPTICS_SWAP_XY = 1UL << 2,
+ SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3,
+};
+
+struct synaptics_i2c_rmi_platform_data {
+ uint32_t version; /* Use this entry for panels with */
+ /* (major << 8 | minor) version or above. */
+ /* If non-zero another array entry follows */
+ int (*power)(int on); /* Only valid in first array entry */
+ uint32_t flags;
+ unsigned long irqflags;
+ uint32_t inactive_left; /* 0x10000 = screen width */
+ uint32_t inactive_right; /* 0x10000 = screen width */
+ uint32_t inactive_top; /* 0x10000 = screen height */
+ uint32_t inactive_bottom; /* 0x10000 = screen height */
+ uint32_t snap_left_on; /* 0x10000 = screen width */
+ uint32_t snap_left_off; /* 0x10000 = screen width */
+ uint32_t snap_right_on; /* 0x10000 = screen width */
+ uint32_t snap_right_off; /* 0x10000 = screen width */
+ uint32_t snap_top_on; /* 0x10000 = screen height */
+ uint32_t snap_top_off; /* 0x10000 = screen height */
+ uint32_t snap_bottom_on; /* 0x10000 = screen height */
+ uint32_t snap_bottom_off; /* 0x10000 = screen height */
+ uint32_t fuzz_x; /* 0x10000 = screen width */
+ uint32_t fuzz_y; /* 0x10000 = screen height */
+ int fuzz_p;
+ int fuzz_w;
+ int8_t sensitivity_adjust;
+};
+
+#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */
diff --git a/include/linux/sync.h b/include/linux/sync.h
new file mode 100644
index 000000000000..38ea986dc70f
--- /dev/null
+++ b/include/linux/sync.h
@@ -0,0 +1,426 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+struct sync_timeline;
+struct sync_pt;
+struct sync_fence;
+
+/**
+ * struct sync_timeline_ops - sync object implementation ops
+ * @driver_name: name of the implentation
+ * @dup: duplicate a sync_pt
+ * @has_signaled: returns:
+ * 1 if pt has signaled
+ * 0 if pt has not signaled
+ * <0 on error
+ * @compare: returns:
+ * 1 if b will signal before a
+ * 0 if a and b will signal at the same time
+ * -1 if a will signabl before b
+ * @free_pt: called before sync_pt is freed
+ * @release_obj: called before sync_timeline is freed
+ * @print_obj: deprecated
+ * @print_pt: deprecated
+ * @fill_driver_data: write implmentation specific driver data to data.
+ * should return an error if there is not enough room
+ * as specified by size. This information is returned
+ * to userspace by SYNC_IOC_FENCE_INFO.
+ * @timeline_value_str: fill str with the value of the sync_timeline's counter
+ * @pt_value_str: fill str with the value of the sync_pt
+ */
+struct sync_timeline_ops {
+ const char *driver_name;
+
+ /* required */
+ struct sync_pt *(*dup)(struct sync_pt *pt);
+
+ /* required */
+ int (*has_signaled)(struct sync_pt *pt);
+
+ /* required */
+ int (*compare)(struct sync_pt *a, struct sync_pt *b);
+
+ /* optional */
+ void (*free_pt)(struct sync_pt *sync_pt);
+
+ /* optional */
+ void (*release_obj)(struct sync_timeline *sync_timeline);
+
+ /* deprecated */
+ void (*print_obj)(struct seq_file *s,
+ struct sync_timeline *sync_timeline);
+
+ /* deprecated */
+ void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
+
+ /* optional */
+ int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+
+ /* optional */
+ void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
+ int size);
+
+ /* optional */
+ void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
+};
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @ops: ops that define the implementaiton of the sync_timeline
+ * @name: name of the sync_timeline. Useful for debugging
+ * @destoryed: set when sync_timeline is destroyed
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head, destroyed, and
+ * sync_pt.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ const struct sync_timeline_ops *ops;
+ char name[32];
+
+ /* protected by child_list_lock */
+ bool destroyed;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+ spinlock_t active_list_lock;
+
+ struct list_head sync_timeline_list;
+};
+
+/**
+ * struct sync_pt - sync point
+ * @parent: sync_timeline to which this sync_pt belongs
+ * @child_list: membership in sync_timeline.child_list_head
+ * @active_list: membership in sync_timeline.active_list_head
+ * @signaled_list: membership in temorary signaled_list on stack
+ * @fence: sync_fence to which the sync_pt belongs
+ * @pt_list: membership in sync_fence.pt_list_head
+ * @status: 1: signaled, 0:active, <0: error
+ * @timestamp: time which sync_pt status transitioned from active to
+ * singaled or error.
+ */
+struct sync_pt {
+ struct sync_timeline *parent;
+ struct list_head child_list;
+
+ struct list_head active_list;
+ struct list_head signaled_list;
+
+ struct sync_fence *fence;
+ struct list_head pt_list;
+
+ /* protected by parent->active_list_lock */
+ int status;
+
+ ktime_t timestamp;
+};
+
+/**
+ * struct sync_fence - sync fence
+ * @file: file representing this fence
+ * @kref: referenace count on fence.
+ * @name: name of sync_fence. Useful for debugging
+ * @pt_list_head: list of sync_pts in ths fence. immutable once fence
+ * is created
+ * @waiter_list_head: list of asynchronous waiters on this fence
+ * @waiter_list_lock: lock protecting @waiter_list_head and @status
+ * @status: 1: signaled, 0:active, <0: error
+ *
+ * @wq: wait queue for fence signaling
+ * @sync_fence_list: membership in global fence list
+ */
+struct sync_fence {
+ struct file *file;
+ struct kref kref;
+ char name[32];
+
+ /* this list is immutable once the fence is created */
+ struct list_head pt_list_head;
+
+ struct list_head waiter_list_head;
+ spinlock_t waiter_list_lock; /* also protects status */
+ int status;
+
+ wait_queue_head_t wq;
+
+ struct list_head sync_fence_list;
+};
+
+struct sync_fence_waiter;
+typedef void (*sync_callback_t)(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
+ * @waiter_list: membership in sync_fence.waiter_list_head
+ * @callback: function pointer to call when fence signals
+ * @callback_data: pointer to pass to @callback
+ */
+struct sync_fence_waiter {
+ struct list_head waiter_list;
+
+ sync_callback_t callback;
+};
+
+static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
+ sync_callback_t callback)
+{
+ waiter->callback = callback;
+}
+
+/*
+ * API for sync_timeline implementers
+ */
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @ops: specifies the implemention ops for the object
+ * @size: size to allocate for this obj
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline which will use the implemetation specified by
+ * @ops. @size bytes will be allocated allowing for implemntation specific
+ * data to be kept after the generic sync_timeline stuct.
+ */
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+ int size, const char *name);
+
+/**
+ * sync_timeline_destory() - destorys a sync object
+ * @obj: sync_timeline to destroy
+ *
+ * A sync implemntation should call this when the @obj is going away
+ * (i.e. module unload.) @obj won't actually be freed until all its childern
+ * sync_pts are freed.
+ */
+void sync_timeline_destroy(struct sync_timeline *obj);
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ *
+ * A sync implemntation should call this any time one of it's sync_pts
+ * has signaled or has an error condition.
+ */
+void sync_timeline_signal(struct sync_timeline *obj);
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: sync_pt's parent sync_timeline
+ * @size: size to allocate for this pt
+ *
+ * Creates a new sync_pt as a chiled of @parent. @size bytes will be
+ * allocated allowing for implemntation specific data to be kept after
+ * the generic sync_timeline struct.
+ */
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
+
+/**
+ * sync_pt_free() - frees a sync pt
+ * @pt: sync_pt to free
+ *
+ * This should only be called on sync_pts which have been created but
+ * not added to a fence.
+ */
+void sync_pt_free(struct sync_pt *pt);
+
+/**
+ * sync_fence_create() - creates a sync fence
+ * @name: name of fence to create
+ * @pt: sync_pt to add to the fence
+ *
+ * Creates a fence containg @pt. Once this is called, the fence takes
+ * ownership of @pt.
+ */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
+
+/*
+ * API for sync_fence consumers
+ */
+
+/**
+ * sync_fence_merge() - merge two fences
+ * @name: name of new fence
+ * @a: fence a
+ * @b: fence b
+ *
+ * Creates a new fence which contains copies of all the sync_pts in both
+ * @a and @b. @a and @b remain valid, independent fences.
+ */
+struct sync_fence *sync_fence_merge(const char *name,
+ struct sync_fence *a, struct sync_fence *b);
+
+/**
+ * sync_fence_fdget() - get a fence from an fd
+ * @fd: fd referencing a fence
+ *
+ * Ensures @fd references a valid fence, increments the refcount of the backing
+ * file, and returns the fence.
+ */
+struct sync_fence *sync_fence_fdget(int fd);
+
+/**
+ * sync_fence_put() - puts a refernnce of a sync fence
+ * @fence: fence to put
+ *
+ * Puts a reference on @fence. If this is the last reference, the fence and
+ * all it's sync_pts will be freed
+ */
+void sync_fence_put(struct sync_fence *fence);
+
+/**
+ * sync_fence_install() - installs a fence into a file descriptor
+ * @fence: fence to instal
+ * @fd: file descriptor in which to install the fence
+ *
+ * Installs @fence into @fd. @fd's should be acquired through get_unused_fd().
+ */
+void sync_fence_install(struct sync_fence *fence, int fd);
+
+/**
+ * sync_fence_wait_async() - registers and async wait on the fence
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * Returns 1 if @fence has already signaled.
+ *
+ * Registers a callback to be called when @fence signals or has an error.
+ * @waiter should be initialized with sync_fence_waiter_init().
+ */
+int sync_fence_wait_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_cancel_async() - cancels an async wait
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * returns 0 if waiter was removed from fence's async waiter list.
+ * returns -ENOENT if waiter was not found on fence's async waiter list.
+ *
+ * Cancels a previously registered async wait. Will fail gracefully if
+ * @waiter was never registered or if @fence has already signaled @waiter.
+ */
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_wait() - wait on fence
+ * @fence: fence to wait on
+ * @tiemout: timeout in ms
+ *
+ * Wait for @fence to be signaled or have an error. Waits indefinitely
+ * if @timeout < 0
+ */
+int sync_fence_wait(struct sync_fence *fence, long timeout);
+
+#endif /* __KERNEL__ */
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implmenting the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependant data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data reutnred to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644
index 000000000000..6bd6c4e52d17
--- /dev/null
+++ b/include/linux/uid_stat.h
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 000000000000..61ebe0aabc5b
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER 0
+#define ACCESSORY_STRING_MODEL 1
+#define ACCESSORY_STRING_DESCRIPTION 2
+#define ACCESSORY_STRING_VERSION 3
+#define ACCESSORY_STRING_URI 4
+#define ACCESSORY_STRING_SERIAL 5
+
+/* Control request for retrieving device's protocol version
+ *
+ * requestType: USB_DIR_IN | USB_TYPE_VENDOR
+ * request: ACCESSORY_GET_PROTOCOL
+ * value: 0
+ * index: 0
+ * data version number (16 bits little endian)
+ * 1 for original accessory support
+ * 2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL 51
+
+/* Control request for host to send a string to the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_STRING
+ * value: 0
+ * index: string ID
+ * data zero terminated UTF8 string
+ *
+ * The device can later retrieve these strings via the
+ * ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING 52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_START
+ * value: 0
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_START 53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID_DEVICE
+ * value: Accessory assigned ID for the HID device
+ * index: total length of the HID report descriptor
+ * data none
+ */
+#define ACCESSORY_REGISTER_HID 54
+
+/* Control request for unregistering a HID device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_UNREGISTER_HID 55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_HID_REPORT_DESC
+ * value: Accessory assigned ID for the HID device
+ * index: offset of data in descriptor
+ * (needed when HID descriptor is too big for one packet)
+ * data the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC 56
+
+/* Control request for sending HID events.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_HID_EVENT
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT 57
+
+/* Control request for setting the audio mode.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_AUDIO_MODE
+ * value: 0 - no audio
+ * 1 - device to host, 44100 16-bit stereo PCM
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_SET_AUDIO_MODE 58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE _IO('M', 8)
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 000000000000..72a432e2fcdd
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,75 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ uint32_t length;
+ /* container type (2 for data packet) */
+ uint16_t type;
+ /* MTP command code */
+ uint16_t command;
+ /* MTP transaction ID */
+ uint32_t transaction_id;
+};
+
+#endif /* __KERNEL__ */
+
+struct mtp_file_range {
+ /* file descriptor for file to transfer */
+ int fd;
+ /* offset in file for start of transfer */
+ loff_t offset;
+ /* number of bytes to transfer */
+ int64_t length;
+ /* MTP command ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint16_t command;
+ /* MTP transaction ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint32_t transaction_id;
+};
+
+struct mtp_event {
+ /* size of the event */
+ size_t length;
+ /* event data to send */
+ void *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range)
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644
index 000000000000..f4a698a22880
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+ WAKE_LOCK_SUSPEND, /* Prevent suspend */
+ WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+ struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+ const char *name)
+{
+ wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+ wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+ __pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+ __pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+ __pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+ return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644
index 000000000000..f07e0679fb82
--- /dev/null
+++ b/include/linux/wifi_tiwlan.h
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS 3
+#define WMPA_NUMBER_OF_BUFFERS 160
+#define WMPA_SECTION_HEADER 24
+#define WMPA_SECTION_SIZE_0 (WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1 (WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2 (WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h
new file mode 100644
index 000000000000..9057ec63d5d3
--- /dev/null
+++ b/include/linux/wl127x-rfkill.h
@@ -0,0 +1,35 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _LINUX_WL127X_RFKILL_H
+#define _LINUX_WL127X_RFKILL_H
+
+#include <linux/rfkill.h>
+
+struct wl127x_rfkill_platform_data {
+ int nshutdown_gpio;
+
+ struct rfkill *rfkill; /* for driver only */
+};
+
+#endif
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 000000000000..40ec3482d1ef
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,27 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+ int (*set_power)(int val);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+
+#endif
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644
index 000000000000..10e4c1506eeb
--- /dev/null
+++ b/include/net/activity_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 45eee08157bb..7ad958abaad0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1586,6 +1586,9 @@ struct hci_conn_info {
__u8 out;
__u16 state;
__u32 link_mode;
+ __u32 mtu;
+ __u32 cnt;
+ __u32 pkts;
};
struct hci_dev_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 014a2eaa5389..0a8d9bafa942 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -619,7 +619,7 @@ static inline void hci_conn_put(struct hci_conn *conn)
if (conn->state == BT_CONNECTED) {
timeo = conn->disc_timeout;
if (!conn->out)
- timeo *= 2;
+ timeo *= 20;
} else {
timeo = msecs_to_jiffies(10);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index aed42c785153..cacc0d732605 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1593,6 +1593,8 @@ extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
extern int tcp_gro_complete(struct sk_buff *skb);
extern int tcp4_gro_complete(struct sk_buff *skb);
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
index 6d9e15ed1dcf..dd8c48d14ed9 100644
--- a/include/sound/cs4271.h
+++ b/include/sound/cs4271.h
@@ -19,7 +19,7 @@
struct cs4271_platform_data {
int gpio_nreset; /* GPIO driving Reset pin, if any */
- int amutec_eq_bmutec:1; /* flag to enable AMUTEC=BMUTEC */
+ bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */
};
#endif /* __CS4271_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 769e27c774a3..bc56738cb109 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -58,8 +58,9 @@
.info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
.put = snd_soc_put_volsw_range, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .shift = xshift, .min = xmin,\
- .max = xmax, .platform_max = xmax, .invert = xinvert} }
+ {.reg = xreg, .rreg = xreg, .shift = xshift, \
+ .rshift = xshift, .min = xmin, .max = xmax, \
+ .platform_max = xmax, .invert = xinvert} }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -88,8 +89,9 @@
.info = snd_soc_info_volsw_range, \
.get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .shift = xshift, .min = xmin,\
- .max = xmax, .platform_max = xmax, .invert = xinvert} }
+ {.reg = xreg, .rreg = xreg, .shift = xshift, \
+ .rshift = xshift, .min = xmin, .max = xmax, \
+ .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7cae2360221e..663e34a5383f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -174,6 +174,7 @@ typedef unsigned __bitwise__ sense_reason_t;
enum tcm_sense_reason_table {
#define R(x) (__force sense_reason_t )(x)
+ TCM_NO_SENSE = R(0x00),
TCM_NON_EXISTENT_LUN = R(0x01),
TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02),
TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03),
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 000000000000..b2ce6b099da6
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq),
+
+ TP_STRUCT__entry(
+ __field( u32, cpu_id )
+ __field(unsigned long, targfreq )
+ __field(unsigned long, actualfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = (u32) cpu_id;
+ __entry->targfreq = targfreq;
+ __entry->actualfreq = actualfreq;
+ ),
+
+ TP_printk("cpu=%u targ=%lu actual=%lu",
+ __entry->cpu_id, __entry->targfreq,
+ __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu_id )
+ __field(unsigned long, load )
+ __field(unsigned long, curtarg )
+ __field(unsigned long, curactual )
+ __field(unsigned long, newtarg )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->load = load;
+ __entry->curtarg = curtarg;
+ __entry->curactual = curactual;
+ __entry->newtarg = newtarg;
+ ),
+
+ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curtarg,
+ __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+ TP_PROTO(char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+ TP_printk("%s", __entry->s)
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+ TP_PROTO(char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+ TP_printk("%s", __entry->s)
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 0c9783841a30..243c677c8434 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -238,6 +238,25 @@ DEFINE_EVENT(clock, clock_set_rate,
TP_ARGS(name, state, cpu_id)
);
+TRACE_EVENT(clock_set_parent,
+
+ TP_PROTO(const char *name, const char *parent_name),
+
+ TP_ARGS(name, parent_name),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __string( parent_name, parent_name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __assign_str(parent_name, parent_name);
+ ),
+
+ TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
/*
* The power domain events are used for power domains transitions
*/
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 5a8671e8a67f..501aa32eb2f0 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -430,6 +430,159 @@ TRACE_EVENT(sched_pi_setprio,
__entry->oldprio, __entry->newprio)
);
+/*
+ * Tracepoint for showing tracked load contribution.
+ */
+TRACE_EVENT(sched_task_load_contrib,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
+
+ TP_ARGS(tsk, load_contrib),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, load_contrib)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->load_contrib = load_contrib;
+ ),
+
+ TP_printk("comm=%s pid=%d load_contrib=%lu",
+ __entry->comm, __entry->pid,
+ __entry->load_contrib)
+);
+
+/*
+ * Tracepoint for showing tracked task runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_runnable_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_rq_runnable_ratio,
+
+ TP_PROTO(int cpu, unsigned long ratio),
+
+ TP_ARGS(cpu, ratio),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("cpu=%d ratio=%lu",
+ __entry->cpu,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable load.
+ */
+TRACE_EVENT(sched_rq_runnable_load,
+
+ TP_PROTO(int cpu, u64 load),
+
+ TP_ARGS(cpu, load),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(u64, load)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->load = load;
+ ),
+
+ TP_printk("cpu=%d load=%llu",
+ __entry->cpu,
+ __entry->load)
+);
+
+/*
+ * Tracepoint for showing tracked task cpu usage ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_usage_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+TRACE_EVENT(sched_hmp_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+ TP_ARGS(tsk, dest, force),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(int, dest)
+ __field(int, force)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->dest = dest;
+ __entry->force = force;
+ ),
+
+ TP_printk("comm=%s pid=%d dest=%d force=%d",
+ __entry->comm, __entry->pid,
+ __entry->dest, __entry->force)
+);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sync.h b/include/trace/events/sync.h
new file mode 100644
index 000000000000..f31bc63ca65d
--- /dev/null
+++ b/include/trace/events/sync.h
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sync
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include <linux/sync.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ if (timeline->ops->timeline_value_str) {
+ timeline->ops->timeline_value_str(timeline,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+);
+
+TRACE_EVENT(sync_wait,
+ TP_PROTO(struct sync_fence *fence, int begin),
+
+ TP_ARGS(fence, begin),
+
+ TP_STRUCT__entry(
+ __string(name, fence->name)
+ __field(s32, status)
+ __field(u32, begin)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, fence->name);
+ __entry->status = fence->status;
+ __entry->begin = begin;
+ ),
+
+ TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
+ __get_str(name), __entry->status)
+);
+
+TRACE_EVENT(sync_pt,
+ TP_PROTO(struct sync_pt *pt),
+
+ TP_ARGS(pt),
+
+ TP_STRUCT__entry(
+ __string(timeline, pt->parent->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(timeline, pt->parent->name);
+ if (pt->parent->ops->pt_value_str) {
+ pt->parent->ops->pt_value_str(pt,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
+ );
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4e67194fd2c3..bf8de2f3b81d 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -168,6 +168,8 @@ header-y += if_plip.h
header-y += if_ppp.h
header-y += if_pppol2tp.h
header-y += if_pppox.h
+header-y += if_pppolac.h
+header-y += if_pppopns.h
header-y += if_slip.h
header-y += if_team.h
header-y += if_tun.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 76352ac45f24..9f096f1c0907 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -26,7 +26,6 @@
#include <linux/types.h>
#include <linux/elf-em.h>
-#include <linux/ptrace.h>
/* The netlink messages for the audit system is divided into blocks:
* 1000 - 1099 are for commanding the audit system
@@ -106,6 +105,7 @@
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
+#define AUDIT_SECCOMP 1326 /* Secure Computing event */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index 0b46fd57c8f6..be570cc28003 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -23,6 +23,7 @@
#include <linux/socket.h>
#include <linux/if_ether.h>
#include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
/* For user-space programs to pick up these definitions
* which they wouldn't get otherwise without defining __KERNEL__
@@ -56,7 +57,9 @@ struct pptp_addr {
#define PX_PROTO_OE 0 /* Currently just PPPoE */
#define PX_PROTO_OL2TP 1 /* Now L2TP also */
#define PX_PROTO_PPTP 2
-#define PX_MAX_PROTO 3
+#define PX_PROTO_OLAC 3
+#define PX_PROTO_OPNS 4
+#define PX_MAX_PROTO 5
struct sockaddr_pppox {
__kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 935119c698ac..440e7b4cee49 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -153,6 +153,9 @@ struct input_keymap_entry {
#define EVIOCGRAB _IOW('E', 0x90, int) /* Grab/Release device */
+#define EVIOCGSUSPENDBLOCK _IOR('E', 0x91, int) /* get suspend block enable */
+#define EVIOCSSUSPENDBLOCK _IOW('E', 0x91, int) /* set suspend block enable */
+
#define EVIOCSCLOCKID _IOW('E', 0xa0, int) /* Set clockid to be used for timestamps */
/*
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index 996719f82e28..a0c20024d8de 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -102,6 +102,7 @@ struct __fat_dirent {
/* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
#define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
#define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
struct fat_boot_sector {
__u8 ignored[3]; /* Boot strap short or near jump */
@@ -139,6 +140,17 @@ struct fat_boot_fsinfo {
__le32 reserved2[4];
};
+struct fat_boot_bsx {
+ __u8 drive; /* drive number */
+ __u8 reserved1;
+ __u8 signature; /* extended boot signature */
+ __u8 vol_id[4]; /* volume ID */
+ __u8 vol_label[11]; /* volume label */
+ __u8 type[8]; /* file system type */
+};
+#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */
+
struct msdos_dir_entry {
__u8 name[MSDOS_NAME];/* name and extension */
__u8 attr; /* attribute bits */
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 208ae9387331..faaa28b3d061 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -4,6 +4,7 @@
* Header file for Xtables timer target module.
*
* Copyright (C) 2004, 2010 Nokia Corporation
+ *
* Written by Timo Teras <ext-timo.teras@nokia.com>
*
* Converted to x_tables and forward-ported to 2.6.34
@@ -32,12 +33,19 @@
#include <linux/types.h>
#define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
struct idletimer_tg_info {
__u32 timeout;
char label[MAX_IDLETIMER_LABEL_SIZE];
+ /* Use netlink messages for notification in addition to sysfs */
+ __u8 send_nl_msg;
+
/* for kernel module internal use only */
struct idletimer_tg *timer __attribute__((aligned(8)));
};
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 26d7217bd4f1..63594564831c 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -11,4 +11,10 @@ struct xt_socket_mtinfo1 {
__u8 flags;
};
+void xt_socket_put_sk(struct sock *sk);
+struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
+ struct xt_action_param *par);
+struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
+ struct xt_action_param *par);
+
#endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/oom.h b/include/uapi/linux/oom.h
index b29272d621ce..0cb16fc8bcfe 100644
--- a/include/uapi/linux/oom.h
+++ b/include/uapi/linux/oom.h
@@ -2,6 +2,17 @@
#define _UAPI__INCLUDE_LINUX_OOM_H
/*
+ * /proc/<pid>/oom_adj is deprecated, see
+ * Documentation/feature-removal-schedule.txt.
+ *
+ * /proc/<pid>/oom_adj set to -17 protects from the oom-killer
+ */
+#define OOM_DISABLE (-17)
+/* inclusive */
+#define OOM_ADJUST_MIN (-16)
+#define OOM_ADJUST_MAX 15
+
+/*
* /proc/<pid>/oom_score_adj set to OOM_SCORE_ADJ_MIN disables oom killing for
* pid.
*/
diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h
index 7997a506ad41..f7ffe36db03c 100644
--- a/include/uapi/linux/sockios.h
+++ b/include/uapi/linux/sockios.h
@@ -65,6 +65,7 @@
#define SIOCDIFADDR 0x8936 /* delete PA address */
#define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */
#define SIOCGIFCOUNT 0x8938 /* get number of devices */
+#define SIOCKILLADDR 0x8939 /* kill sockets with this local addr */
#define SIOCGIFBR 0x8940 /* Bridging support */
#define SIOCSIFBR 0x8941 /* Set bridging options */
diff --git a/init/Kconfig b/init/Kconfig
index 7d30240e5bfe..6715f94bfd2d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1182,7 +1182,7 @@ config CC_OPTIMIZE_FOR_SIZE
Enabling this option will pass "-Os" instead of "-O2" to gcc
resulting in a smaller kernel.
- If unsure, say Y.
+ If unsure, say N.
config SYSCTL
bool
@@ -1190,6 +1190,12 @@ config SYSCTL
config ANON_INODES
bool
+config PANIC_TIMEOUT
+ int "Default panic timeout"
+ default 0
+ help
+ Set default panic timeout.
+
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
# Unhide debug options, to make the on-by-default options visible
diff --git a/kernel/async.c b/kernel/async.c
index 9d3118384858..a1d585c351d6 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -196,6 +196,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
+ /* mark that this task has queued an async job, used by module init */
+ current->flags |= PF_USED_ASYNC;
+
/* schedule for execution */
queue_work(system_unbound_wq, &entry->work);
diff --git a/kernel/audit.c b/kernel/audit.c
index 40414e9143db..d596e5355f15 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old,
int rc = 0;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+ if (unlikely(!ab))
+ return rc;
audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
old, from_kuid(&init_user_ns, loginuid), sessionid);
if (sid) {
@@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
}
*ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
+ if (unlikely(!*ab))
+ return rc;
audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
task_tgid_vnr(current),
from_kuid(&init_user_ns, current_uid()),
@@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx,
}
}
+/*
+ * Wait for auditd to drain the queue a little
+ */
+static void wait_for_auditd(unsigned long sleep_time)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&audit_backlog_wait, &wait);
+
+ if (audit_backlog_limit &&
+ skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
+ schedule_timeout(sleep_time);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&audit_backlog_wait, &wait);
+}
+
/* Obtain an audit buffer. This routine does locking to obtain the
* audit buffer, but then no locking is required for calls to
* audit_log_*format. If the tsk is a task that is currently in a
@@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
while (audit_backlog_limit
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
- if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time
- && time_before(jiffies, timeout_start + audit_backlog_wait_time)) {
+ if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
+ unsigned long sleep_time;
- /* Wait for auditd to drain the queue a little */
- DECLARE_WAITQUEUE(wait, current);
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&audit_backlog_wait, &wait);
-
- if (audit_backlog_limit &&
- skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
- schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies);
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&audit_backlog_wait, &wait);
+ sleep_time = timeout_start + audit_backlog_wait_time -
+ jiffies;
+ if ((long)sleep_time > 0)
+ wait_for_auditd(sleep_time);
continue;
}
if (audit_rate_check() && printk_ratelimit())
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index e81175ef25f8..642a89c4f3d6 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
return 0;
}
+static void audit_log_remove_rule(struct audit_krule *rule)
+{
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+ if (unlikely(!ab))
+ return;
+ audit_log_format(ab, "op=");
+ audit_log_string(ab, "remove rule");
+ audit_log_format(ab, " dir=");
+ audit_log_untrustedstring(ab, rule->tree->pathname);
+ audit_log_key(ab, rule->filterkey);
+ audit_log_format(ab, " list=%d res=1", rule->listnr);
+ audit_log_end(ab);
+}
+
static void kill_rules(struct audit_tree *tree)
{
struct audit_krule *rule, *next;
struct audit_entry *entry;
- struct audit_buffer *ab;
list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
entry = container_of(rule, struct audit_entry, rule);
@@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree)
list_del_init(&rule->rlist);
if (rule->tree) {
/* not a half-baked one */
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "op=");
- audit_log_string(ab, "remove rule");
- audit_log_format(ab, " dir=");
- audit_log_untrustedstring(ab, rule->tree->pathname);
- audit_log_key(ab, rule->filterkey);
- audit_log_format(ab, " list=%d res=1", rule->listnr);
- audit_log_end(ab);
+ audit_log_remove_rule(rule);
rule->tree = NULL;
list_del_rcu(&entry->list);
list_del(&entry->rule.list);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 4a599f699adc..22831c4d369c 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
if (audit_enabled) {
struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+ if (unlikely(!ab))
+ return;
audit_log_format(ab, "auid=%u ses=%u op=",
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 7f19f23d38a3..f9fc54bbe06f 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1144,7 +1144,6 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
* audit_receive_filter - apply all rules to the specified message type
* @type: audit message type
* @pid: target pid for netlink audit messages
- * @uid: target uid for netlink audit messages
* @seq: netlink audit message sequence (serial) number
* @data: payload data
* @datasz: size of payload data
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e37e6a12c5e3..a371f857a0a9 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic)
audit_log_end(ab);
ab = audit_log_start(context, GFP_KERNEL,
AUDIT_IPC_SET_PERM);
+ if (unlikely(!ab))
+ return;
audit_log_format(ab,
"qbytes=%lx ouid=%u ogid=%u mode=%#ho",
context->ipc.qbytes,
context->ipc.perm_uid,
context->ipc.perm_gid,
context->ipc.perm_mode);
- if (!ab)
- return;
}
break; }
case AUDIT_MQ_OPEN: {
@@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags)
context->type = AUDIT_MMAP;
}
-static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+static void audit_log_task(struct audit_buffer *ab)
{
kuid_t auid, uid;
kgid_t gid;
@@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
audit_log_task_context(ab);
audit_log_format(ab, " pid=%d comm=", current->pid);
audit_log_untrustedstring(ab, current->comm);
+}
+
+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+{
+ audit_log_task(ab);
audit_log_format(ab, " reason=");
audit_log_string(ab, reason);
audit_log_format(ab, " sig=%ld", signr);
@@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr)
return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+ if (unlikely(!ab))
+ return;
audit_log_abend(ab, "memory violation", signr);
audit_log_end(ab);
}
@@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
{
struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
- audit_log_abend(ab, "seccomp", signr);
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
+ if (unlikely(!ab))
+ return;
+ audit_log_task(ab);
+ audit_log_format(ab, " sig=%ld", signr);
audit_log_format(ab, " syscall=%ld", syscall);
audit_log_format(ab, " compat=%d", is_compat_task());
audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3046a503242c..17b99f0e7896 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -731,3 +731,23 @@ void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 9a61738cefc8..4a5cf036c07b 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -85,6 +85,10 @@ static int kgdb_use_con;
bool dbg_is_early = true;
/* Next cpu to become the master debug core */
int dbg_switch_cpu;
+/* Flag for entering kdb when a panic occurs */
+static bool break_on_panic = true;
+/* Flag for entering kdb when an exception occurs */
+static bool break_on_exception = true;
/* Use kdb or gdbserver mode */
int dbg_kdb_mode = 1;
@@ -99,6 +103,8 @@ early_param("kgdbcon", opt_kgdb_con);
module_param(kgdb_use_con, int, 0644);
module_param(kgdbreboot, int, 0644);
+module_param(break_on_panic, bool, 0644);
+module_param(break_on_exception, bool, 0644);
/*
* Holds information about breakpoints in a kernel. These breakpoints are
@@ -677,6 +683,9 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
if (arch_kgdb_ops.enable_nmi)
arch_kgdb_ops.enable_nmi(0);
+ if (unlikely(signo != SIGTRAP && !break_on_exception))
+ return 1;
+
ks->cpu = raw_smp_processor_id();
ks->ex_vector = evector;
ks->signo = signo;
@@ -783,6 +792,9 @@ static int kgdb_panic_event(struct notifier_block *self,
unsigned long val,
void *data)
{
+ if (!break_on_panic)
+ return NOTIFY_DONE;
+
if (dbg_kdb_mode)
kdb_printf("PANIC: %s\n", (char *)data);
kgdb_breakpoint();
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 14ff4849262c..4b0fb2fb779c 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
int i;
int diag, dtab_count;
int key;
-
+ static int last_crlf;
diag = kdbgetintenv("DTABCOUNT", &dtab_count);
if (diag)
@@ -237,6 +237,9 @@ poll_again:
return buffer;
if (key != 9)
tab = 0;
+ if (key != 10 && key != 13)
+ last_crlf = 0;
+
switch (key) {
case 8: /* backspace */
if (cp > buffer) {
@@ -254,7 +257,12 @@ poll_again:
*cp = tmp;
}
break;
- case 13: /* enter */
+ case 10: /* new line */
+ case 13: /* carriage return */
+ /* handle \n after \r */
+ if (last_crlf && last_crlf != key)
+ break;
+ last_crlf = key;
*lastchar++ = '\n';
*lastchar++ = '\0';
if (!KDB_STATE(KGDB_TRANS)) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 65ca6d27f24e..63bdc82a34ec 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -197,6 +197,9 @@ struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
@@ -230,6 +233,18 @@ static inline void put_signal_struct(struct signal_struct *sig)
free_signal_struct(sig);
}
+int task_free_register(struct notifier_block *n)
+{
+ return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+ return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
@@ -241,6 +256,7 @@ void __put_task_struct(struct task_struct *tsk)
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
+ atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
@@ -695,7 +711,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, mode)) {
+ !ptrace_may_access(task, mode) &&
+ !capable(CAP_SYS_RESOURCE)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
@@ -1064,6 +1081,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
init_rwsem(&sig->group_rwsem);
#endif
+ sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cfd..473b2b6eccb5 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -23,10 +23,27 @@
static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
+static int __init irq_affinity_setup(char *str)
+{
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ cpulist_parse(str, irq_default_affinity);
+ /*
+ * Set at least the boot cpu. We don't want to end up with
+ * bugreports caused by random comandline masks
+ */
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+ return 1;
+}
+__setup("irqaffinity=", irq_affinity_setup);
+
static void __init init_irq_default_affinity(void)
{
- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
- cpumask_setall(irq_default_affinity);
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ if (!irq_default_affinity)
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+#endif
+ if (cpumask_empty(irq_default_affinity))
+ cpumask_setall(irq_default_affinity);
}
#else
static void __init init_irq_default_affinity(void)
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index cb228bf21760..20d471a04dbc 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -109,8 +109,13 @@ int check_wakeup_irqs(void)
* can abort suspend.
*/
if (irqd_is_wakeup_set(&desc->irq_data)) {
- if (desc->depth == 1 && desc->istate & IRQS_PENDING)
+ if (desc->depth == 1 && desc->istate & IRQS_PENDING) {
+ pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+ irq,
+ desc->action && desc->action->name ?
+ desc->action->name : "");
return -EBUSY;
+ }
continue;
}
/*
diff --git a/kernel/module.c b/kernel/module.c
index 250092c1d57d..b10b048367e1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3013,6 +3013,12 @@ static int do_init_module(struct module *mod)
{
int ret = 0;
+ /*
+ * We want to find out whether @mod uses async during init. Clear
+ * PF_USED_ASYNC. async_schedule*() will set it.
+ */
+ current->flags &= ~PF_USED_ASYNC;
+
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_COMING, mod);
@@ -3058,8 +3064,25 @@ static int do_init_module(struct module *mod)
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
- /* We need to finish all async code before the module init sequence is done */
- async_synchronize_full();
+ /*
+ * We need to finish all async code before the module init sequence
+ * is done. This has potential to deadlock. For example, a newly
+ * detected block device can trigger request_module() of the
+ * default iosched from async probing task. Once userland helper
+ * reaches here, async_synchronize_full() will wait on the async
+ * task waiting on request_module() and deadlock.
+ *
+ * This deadlock is avoided by perfomring async_synchronize_full()
+ * iff module init queued any async jobs. This isn't a full
+ * solution as it will deadlock the same if module loading from
+ * async jobs nests more than once; however, due to the various
+ * constraints, this hack seems to be the best option for now.
+ * Please refer to the following thread for details.
+ *
+ * http://thread.gmane.org/gmane.linux.kernel/1420814
+ */
+ if (current->flags & PF_USED_ASYNC)
+ async_synchronize_full();
mutex_lock(&module_mutex);
/* Drop initial reference. */
diff --git a/kernel/panic.c b/kernel/panic.c
index e1b2822fff97..aee7ae48f7df 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,13 +27,19 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
+/* Machine specific panic information string */
+char *mach_panic_string;
+
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -383,6 +389,11 @@ late_initcall(init_oops_id);
void print_oops_end_marker(void)
{
init_oops_id();
+
+ if (mach_panic_string)
+ printk(KERN_WARNING "Board Information: %s\n",
+ mach_panic_string);
+
printk(KERN_WARNING "---[ end trace %016llx ]---\n",
(unsigned long long)oops_id);
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 5dfdc9ea180b..8789befa54a1 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,21 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config HAS_WAKELOCK
+ bool
+ default n
+
+config WAKELOCK
+ bool
+ default n
+
+config FB_EARLYSUSPEND
+ bool "Sysfs compat interface for earlysuspend"
+ ---help---
+ Register early suspend handler that notifies and waits for
+ user-space through sysfs when user-space should stop drawing
+ to the screen and notifies user-space when it should resume.
+
config HIBERNATE_CALLBACKS
bool
@@ -274,3 +289,10 @@ config PM_GENERIC_DOMAINS_RUNTIME
config CPU_PM
bool
depends on SUSPEND || CPU_IDLE
+
+config SUSPEND_TIME
+ bool "Log time spent in suspend"
+ ---help---
+ Prints the time spent in suspend in the kernel log, and
+ keeps statistics on the time spent in suspend in
+ /sys/kernel/debug/suspend_time
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 29472bff11ef..12ca37ea7035 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -11,5 +11,7 @@ obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
block_io.o
obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o
obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
+obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o
+obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 000000000000..8dca551c9cd6
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,116 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+ FB_STATE_STOPPED_DRAWING,
+ FB_STATE_REQUEST_STOP_DRAWING,
+ FB_STATE_DRAWING_OK,
+} fb_state;
+
+
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state != FB_STATE_DRAWING_OK);
+ if (ret && fb_state == FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "sleeping");
+ return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+ fb_state = FB_STATE_STOPPED_DRAWING;
+ wake_up(&fb_state_wq);
+ }
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state == FB_STATE_DRAWING_OK);
+ if (ret && fb_state != FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "awake");
+
+ return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0444, \
+ }, \
+ .show = _name##_show, \
+ .store = NULL, \
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+ &wait_for_fb_sleep_attr.attr,
+ &wait_for_fb_wake_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = g,
+};
+
+static int __init android_power_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&fb_state_wq);
+ fb_state = FB_STATE_DRAWING_OK;
+
+ ret = sysfs_create_group(power_kobj, &attr_group);
+ if (ret) {
+ pr_err("android_power_init: sysfs_create_group failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit android_power_exit(void)
+{
+ sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(android_power_init);
+module_exit(android_power_exit);
+
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 000000000000..d2a65da9f22c
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ seq_printf(s, "time (secs) count\n");
+ seq_printf(s, "------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (time_in_suspend_bins[bin] == 0)
+ continue;
+ seq_printf(s, "%4d - %4d %4u\n",
+ bin ? 1 << (bin - 1) : 0, 1 << bin,
+ time_in_suspend_bins[bin]);
+ }
+ return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+ .open = suspend_time_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+ &suspend_time_debug_fops);
+ if (!d) {
+ pr_err("Failed to create suspend_time debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+ read_persistent_clock(&suspend_time_before);
+
+ return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+ struct timespec after;
+
+ read_persistent_clock(&after);
+
+ after = timespec_sub(after, suspend_time_before);
+
+ time_in_suspend_bins[fls(after.tv_sec)]++;
+
+ pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+ after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+ .suspend = suspend_time_syscore_suspend,
+ .resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+ register_syscore_ops(&suspend_time_syscore_ops);
+
+ return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+ unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 6850f53e02d8..b3c6c3fcd847 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
EXPORT_SYMBOL(down_read_nested);
+void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
+{
+ might_sleep();
+ rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
+
+ LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
+}
+
+EXPORT_SYMBOL(_down_write_nest_lock);
+
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..84b2c8bc1ec1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1558,6 +1558,10 @@ static void __sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+#endif
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -7004,13 +7008,24 @@ static inline int preempt_count_equals(int preempt_offset)
return (nested == preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b4e582..b9d54d0d7bb0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -94,6 +94,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#ifdef CONFIG_SMP
P(se->avg.runnable_avg_sum);
P(se->avg.runnable_avg_period);
+ P(se->avg.usage_avg_sum);
P(se->avg.load_avg_contrib);
P(se->avg.decay_count);
#endif
@@ -230,6 +231,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->tg_runnable_contrib);
SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
atomic_read(&cfs_rq->tg->runnable_avg));
+ SEQ_printf(m, " .%-30s: %d\n", "tg->usage_avg",
+ atomic_read(&cfs_rq->tg->usage_avg));
#endif
print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eea8707234a..9ba5a99def6c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -31,9 +31,20 @@
#include <linux/task_work.h>
#include <trace/events/sched.h>
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* Include cpufreq header to add a notifier so that cpu frequency
+ * scaling can track the current CPU frequency
+ */
+#include <linux/cpufreq.h>
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
#include "sched.h"
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -1200,8 +1211,95 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
-/*
- * We can represent the historical contribution to runnable average as the
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+
+#define HMP_VARIABLE_SCALE_SHIFT 16ULL
+struct hmp_global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+ int *value;
+ int (*to_sysfs)(int);
+ int (*from_sysfs)(int);
+};
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+#define HMP_DATA_SYSFS_MAX 4
+#else
+#define HMP_DATA_SYSFS_MAX 3
+#endif
+
+struct hmp_data_struct {
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ int freqinvar_load_scale_enabled;
+#endif
+ int multiplier; /* used to scale the time delta */
+ struct attribute_group attr_group;
+ struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1];
+ struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX];
+} hmp_data;
+
+static u64 hmp_variable_scale_convert(u64 delta);
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* Frequency-Invariant Load Modification:
+ * Loads are calculated as in PJT's patch however we also scale the current
+ * contribution in line with the frequency of the CPU that the task was
+ * executed on.
+ * In this version, we use a simple linear scale derived from the maximum
+ * frequency reported by CPUFreq. As an example:
+ *
+ * Consider that we ran a task for 100% of the previous interval.
+ *
+ * Our CPU was under asynchronous frequency control through one of the
+ * CPUFreq governors.
+ *
+ * The CPUFreq governor reports that it is able to scale the CPU between
+ * 500MHz and 1GHz.
+ *
+ * During the period, the CPU was running at 1GHz.
+ *
+ * In this case, our load contribution for that period is calculated as
+ * 1 * (number_of_active_microseconds)
+ *
+ * This results in our task being able to accumulate maximum load as normal.
+ *
+ *
+ * Consider now that our CPU was executing at 500MHz.
+ *
+ * We now scale the load contribution such that it is calculated as
+ * 0.5 * (number_of_active_microseconds)
+ *
+ * Our task can only record 50% maximum load during this period.
+ *
+ * This represents the task consuming 50% of the CPU's *possible* compute
+ * capacity. However the task did consume 100% of the CPU's *available*
+ * compute capacity which is the value seen by the CPUFreq governor and
+ * user-side CPU Utilization tools.
+ *
+ * Restricting tracked load to be scaled by the CPU's frequency accurately
+ * represents the consumption of possible compute capacity and allows the
+ * HMP migration's simple threshold migration strategy to interact more
+ * predictably with CPUFreq's asynchronous compute capacity changes.
+ */
+#define SCHED_FREQSCALE_SHIFT 10
+struct cpufreq_extents {
+ u32 curr_scale;
+ u32 min;
+ u32 max;
+ u32 flags;
+};
+/* Flag set when the governor in use only allows one frequency.
+ * Disables scaling.
+ */
+#define SCHED_LOAD_FREQINVAR_SINGLEFREQ 0x01
+
+static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
+
+/* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
* history into segments of approximately 1ms (1024us); label the segment that
* occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
@@ -1230,13 +1328,24 @@ static u32 __compute_runnable_contrib(u64 n)
*/
static __always_inline int __update_entity_runnable_avg(u64 now,
struct sched_avg *sa,
- int runnable)
+ int runnable,
+ int running,
+ int cpu)
{
u64 delta, periods;
u32 runnable_contrib;
int delta_w, decayed = 0;
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ u64 scaled_delta;
+ u32 scaled_runnable_contrib;
+ int scaled_delta_w;
+ u32 curr_scale = 1024;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
delta = now - sa->last_runnable_update;
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+ delta = hmp_variable_scale_convert(delta);
+#endif
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.
@@ -1255,6 +1364,12 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
return 0;
sa->last_runnable_update = now;
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ /* retrieve scale factor for load */
+ if (hmp_data.freqinvar_load_scale_enabled)
+ curr_scale = freq_scale[cpu].curr_scale;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+
/* delta_w is the amount already accumulated against our next period */
delta_w = sa->runnable_avg_period % 1024;
if (delta + delta_w >= 1024) {
@@ -1267,8 +1382,20 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
* period and accrue it.
*/
delta_w = 1024 - delta_w;
+ /* scale runnable time if necessary */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_delta_w = (delta_w * curr_scale)
+ >> SCHED_FREQSCALE_SHIFT;
+ if (runnable)
+ sa->runnable_avg_sum += scaled_delta_w;
+ if (running)
+ sa->usage_avg_sum += scaled_delta_w;
+#else
if (runnable)
sa->runnable_avg_sum += delta_w;
+ if (running)
+ sa->usage_avg_sum += delta_w;
+#endif /* #ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += delta_w;
delta -= delta_w;
@@ -1276,22 +1403,49 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
/* Figure out how many additional periods this update spans */
periods = delta / 1024;
delta %= 1024;
-
+ /* decay the load we have accumulated so far */
sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
periods + 1);
sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
periods + 1);
-
+ sa->usage_avg_sum = decay_load(sa->usage_avg_sum, periods + 1);
+ /* add the contribution from this period */
/* Efficiently calculate \sum (1..n_period) 1024*y^i */
runnable_contrib = __compute_runnable_contrib(periods);
+ /* Apply load scaling if necessary.
+ * Note that multiplying the whole series is same as
+ * multiplying all terms
+ */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_runnable_contrib = (runnable_contrib * curr_scale)
+ >> SCHED_FREQSCALE_SHIFT;
+ if (runnable)
+ sa->runnable_avg_sum += scaled_runnable_contrib;
+ if (running)
+ sa->usage_avg_sum += scaled_runnable_contrib;
+#else
if (runnable)
sa->runnable_avg_sum += runnable_contrib;
+ if (running)
+ sa->usage_avg_sum += runnable_contrib;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += runnable_contrib;
}
/* Remainder of delta accrued against u_0` */
+ /* scale if necessary */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_delta = ((delta * curr_scale) >> SCHED_FREQSCALE_SHIFT);
+ if (runnable)
+ sa->runnable_avg_sum += scaled_delta;
+ if (running)
+ sa->usage_avg_sum += scaled_delta;
+#else
if (runnable)
sa->runnable_avg_sum += delta;
+ if (running)
+ sa->usage_avg_sum += delta;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += delta;
return decayed;
@@ -1337,16 +1491,28 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq)
{
struct task_group *tg = cfs_rq->tg;
- long contrib;
+ long contrib, usage_contrib;
/* The fraction of a cpu used by this cfs_rq */
contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
sa->runnable_avg_period + 1);
contrib -= cfs_rq->tg_runnable_contrib;
- if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
+ usage_contrib = div_u64(sa->usage_avg_sum << NICE_0_SHIFT,
+ sa->runnable_avg_period + 1);
+ usage_contrib -= cfs_rq->tg_usage_contrib;
+
+ /*
+ * contrib/usage at this point represent deltas, only update if they
+ * are substantive.
+ */
+ if ((abs(contrib) > cfs_rq->tg_runnable_contrib / 64) ||
+ (abs(usage_contrib) > cfs_rq->tg_usage_contrib / 64)) {
atomic_add(contrib, &tg->runnable_avg);
cfs_rq->tg_runnable_contrib += contrib;
+
+ atomic_add(usage_contrib, &tg->usage_avg);
+ cfs_rq->tg_usage_contrib += usage_contrib;
}
}
@@ -1407,6 +1573,11 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
contrib /= (se->avg.runnable_avg_period + 1);
se->avg.load_avg_contrib = scale_load(contrib);
+ trace_sched_task_load_contrib(task_of(se), se->avg.load_avg_contrib);
+ contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD);
+ contrib /= (se->avg.runnable_avg_period + 1);
+ se->avg.load_avg_ratio = scale_load(contrib);
+ trace_sched_task_runnable_ratio(task_of(se), se->avg.load_avg_ratio);
}
/* Compute the current contribution to load_avg by se, return any delta */
@@ -1442,7 +1613,11 @@ static inline void update_entity_load_avg(struct sched_entity *se,
struct cfs_rq *cfs_rq = cfs_rq_of(se);
long contrib_delta;
u64 now;
+ int cpu = -1; /* not used in normal case */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ cpu = cfs_rq->rq->cpu;
+#endif
/*
* For a group entity we need to use their owned cfs_rq_clock_task() in
* case they are the parent of a throttled hierarchy.
@@ -1452,7 +1627,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
else
now = cfs_rq_clock_task(group_cfs_rq(se));
- if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
+ if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq,
+ cfs_rq->curr == se, cpu))
return;
contrib_delta = __update_entity_load_avg_contrib(se);
@@ -1496,8 +1672,19 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
- __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
+ u32 contrib;
+ int cpu = -1; /* not used in normal case */
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ cpu = rq->cpu;
+#endif
+ __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable,
+ runnable, cpu);
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
+ contrib = rq->avg.runnable_avg_sum * scale_load_down(1024);
+ contrib /= (rq->avg.runnable_avg_period + 1);
+ trace_sched_rq_runnable_ratio(cpu_of(rq), scale_load(contrib));
+ trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg);
}
/* Add the load generated by se into cfs_rq's child load-average */
@@ -1866,6 +2053,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
+ update_entity_load_avg(se, 1);
}
update_stats_curr_start(cfs_rq, se);
@@ -3301,6 +3489,387 @@ done:
return target;
}
+#ifdef CONFIG_SCHED_HMP
+/*
+ * Heterogenous multiprocessor (HMP) optimizations
+ *
+ * The cpu types are distinguished using a list of hmp_domains
+ * which each represent one cpu type using a cpumask.
+ * The list is assumed ordered by compute capacity with the
+ * fastest domain first.
+ */
+DEFINE_PER_CPU(struct hmp_domain *, hmp_cpu_domain);
+
+extern void __init arch_get_hmp_domains(struct list_head *hmp_domains_list);
+
+/* Setup hmp_domains */
+static int __init hmp_cpu_mask_setup(void)
+{
+ char buf[64];
+ struct hmp_domain *domain;
+ struct list_head *pos;
+ int dc, cpu;
+
+ pr_debug("Initializing HMP scheduler:\n");
+
+ /* Initialize hmp_domains using platform code */
+ arch_get_hmp_domains(&hmp_domains);
+ if (list_empty(&hmp_domains)) {
+ pr_debug("HMP domain list is empty!\n");
+ return 0;
+ }
+
+ /* Print hmp_domains */
+ dc = 0;
+ list_for_each(pos, &hmp_domains) {
+ domain = list_entry(pos, struct hmp_domain, hmp_domains);
+ cpulist_scnprintf(buf, 64, &domain->cpus);
+ pr_debug(" HMP domain %d: %s\n", dc, buf);
+
+ for_each_cpu_mask(cpu, domain->cpus) {
+ per_cpu(hmp_cpu_domain, cpu) = domain;
+ }
+ dc++;
+ }
+
+ return 1;
+}
+
+/*
+ * Migration thresholds should be in the range [0..1023]
+ * hmp_up_threshold: min. load required for migrating tasks to a faster cpu
+ * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
+ * The default values (512, 256) offer good responsiveness, but may need
+ * tweaking suit particular needs.
+ *
+ * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
+ * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms)
+ * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms)
+ */
+unsigned int hmp_up_threshold = 512;
+unsigned int hmp_down_threshold = 256;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
+#endif
+unsigned int hmp_next_up_threshold = 4096;
+unsigned int hmp_next_down_threshold = 4096;
+
+static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
+static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
+
+/* Check if cpu is in fastest hmp_domain */
+static inline unsigned int hmp_cpu_is_fastest(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return pos == hmp_domains.next;
+}
+
+/* Check if cpu is in slowest hmp_domain */
+static inline unsigned int hmp_cpu_is_slowest(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_is_last(pos, &hmp_domains);
+}
+
+/* Next (slower) hmp_domain relative to cpu */
+static inline struct hmp_domain *hmp_slower_domain(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_entry(pos->next, struct hmp_domain, hmp_domains);
+}
+
+/* Previous (faster) hmp_domain relative to cpu */
+static inline struct hmp_domain *hmp_faster_domain(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_entry(pos->prev, struct hmp_domain, hmp_domains);
+}
+
+/*
+ * Selects a cpu in previous (faster) hmp_domain
+ * Note that cpumask_any_and() returns the first cpu in the cpumask
+ */
+static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
+ int cpu)
+{
+ return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
+ tsk_cpus_allowed(tsk));
+}
+
+/*
+ * Selects a cpu in next (slower) hmp_domain
+ * Note that cpumask_any_and() returns the first cpu in the cpumask
+ */
+static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
+ int cpu)
+{
+ return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(tsk));
+}
+
+static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+ se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_down_migration = 0;
+}
+
+static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+ se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_up_migration = 0;
+}
+
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+/*
+ * Heterogenous multiprocessor (HMP) optimizations
+ *
+ * These functions allow to change the growing speed of the load_avg_ratio
+ * by default it goes from 0 to 0.5 in LOAD_AVG_PERIOD = 32ms
+ * This can now be changed with /sys/kernel/hmp/load_avg_period_ms.
+ *
+ * These functions also allow to change the up and down threshold of HMP
+ * using /sys/kernel/hmp/{up,down}_threshold.
+ * Both must be between 0 and 1023. The threshold that is compared
+ * to the load_avg_ratio is up_threshold/1024 and down_threshold/1024.
+ *
+ * For instance, if load_avg_period = 64 and up_threshold = 512, an idle
+ * task with a load of 0 will reach the threshold after 64ms of busy loop.
+ *
+ * Changing load_avg_periods_ms has the same effect than changing the
+ * default scaling factor Y=1002/1024 in the load_avg_ratio computation to
+ * (1002/1024.0)^(LOAD_AVG_PERIOD/load_avg_period_ms), but the last one
+ * could trigger overflows.
+ * For instance, with Y = 1023/1024 in __update_task_entity_contrib()
+ * "contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);"
+ * could be overflowed for a weight > 2^12 even is the load_avg_contrib
+ * should still be a 32bits result. This would not happen by multiplicating
+ * delta time by 1/22 and setting load_avg_period_ms = 706.
+ */
+
+/*
+ * By scaling the delta time it end-up increasing or decrease the
+ * growing speed of the per entity load_avg_ratio
+ * The scale factor hmp_data.multiplier is a fixed point
+ * number: (32-HMP_VARIABLE_SCALE_SHIFT).HMP_VARIABLE_SCALE_SHIFT
+ */
+static u64 hmp_variable_scale_convert(u64 delta)
+{
+ u64 high = delta >> 32ULL;
+ u64 low = delta & 0xffffffffULL;
+ low *= hmp_data.multiplier;
+ high *= hmp_data.multiplier;
+ return (low >> HMP_VARIABLE_SCALE_SHIFT)
+ + (high << (32ULL - HMP_VARIABLE_SCALE_SHIFT));
+}
+
+static ssize_t hmp_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct hmp_global_attr *hmp_attr =
+ container_of(attr, struct hmp_global_attr, attr);
+ int temp = *(hmp_attr->value);
+ if (hmp_attr->to_sysfs != NULL)
+ temp = hmp_attr->to_sysfs(temp);
+ ret = sprintf(buf, "%d\n", temp);
+ return ret;
+}
+
+static ssize_t hmp_store(struct kobject *a, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int temp;
+ ssize_t ret = count;
+ struct hmp_global_attr *hmp_attr =
+ container_of(attr, struct hmp_global_attr, attr);
+ char *str = vmalloc(count + 1);
+ if (str == NULL)
+ return -ENOMEM;
+ memcpy(str, buf, count);
+ str[count] = 0;
+ if (sscanf(str, "%d", &temp) < 1)
+ ret = -EINVAL;
+ else {
+ if (hmp_attr->from_sysfs != NULL)
+ temp = hmp_attr->from_sysfs(temp);
+ if (temp < 0)
+ ret = -EINVAL;
+ else
+ *(hmp_attr->value) = temp;
+ }
+ vfree(str);
+ return ret;
+}
+
+static int hmp_period_tofrom_sysfs(int value)
+{
+ return (LOAD_AVG_PERIOD << HMP_VARIABLE_SCALE_SHIFT) / value;
+}
+
+/* max value for threshold is 1024 */
+static int hmp_theshold_from_sysfs(int value)
+{
+ if (value > 1024)
+ return -1;
+ return value;
+}
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* freqinvar control is only 0,1 off/on */
+static int hmp_freqinvar_from_sysfs(int value)
+{
+ if (value < 0 || value > 1)
+ return -1;
+ return value;
+}
+#endif
+static void hmp_attr_add(
+ const char *name,
+ int *value,
+ int (*to_sysfs)(int),
+ int (*from_sysfs)(int))
+{
+ int i = 0;
+ while (hmp_data.attributes[i] != NULL) {
+ i++;
+ if (i >= HMP_DATA_SYSFS_MAX)
+ return;
+ }
+ hmp_data.attr[i].attr.mode = 0644;
+ hmp_data.attr[i].show = hmp_show;
+ hmp_data.attr[i].store = hmp_store;
+ hmp_data.attr[i].attr.name = name;
+ hmp_data.attr[i].value = value;
+ hmp_data.attr[i].to_sysfs = to_sysfs;
+ hmp_data.attr[i].from_sysfs = from_sysfs;
+ hmp_data.attributes[i] = &hmp_data.attr[i].attr;
+ hmp_data.attributes[i + 1] = NULL;
+}
+
+static int hmp_attr_init(void)
+{
+ int ret;
+ memset(&hmp_data, sizeof(hmp_data), 0);
+ /* by default load_avg_period_ms == LOAD_AVG_PERIOD
+ * meaning no change
+ */
+ hmp_data.multiplier = hmp_period_tofrom_sysfs(LOAD_AVG_PERIOD);
+
+ hmp_attr_add("load_avg_period_ms",
+ &hmp_data.multiplier,
+ hmp_period_tofrom_sysfs,
+ hmp_period_tofrom_sysfs);
+ hmp_attr_add("up_threshold",
+ &hmp_up_threshold,
+ NULL,
+ hmp_theshold_from_sysfs);
+ hmp_attr_add("down_threshold",
+ &hmp_down_threshold,
+ NULL,
+ hmp_theshold_from_sysfs);
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ /* default frequency-invariant scaling ON */
+ hmp_data.freqinvar_load_scale_enabled = 1;
+ hmp_attr_add("frequency_invariant_load_scale",
+ &hmp_data.freqinvar_load_scale_enabled,
+ NULL,
+ hmp_freqinvar_from_sysfs);
+#endif
+ hmp_data.attr_group.name = "hmp";
+ hmp_data.attr_group.attrs = hmp_data.attributes;
+ ret = sysfs_create_group(kernel_kobj,
+ &hmp_data.attr_group);
+ return 0;
+}
+late_initcall(hmp_attr_init);
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
+
+static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
+ int *min_cpu)
+{
+ int cpu;
+ int min_load = INT_MAX;
+ int min_cpu_temp = NR_CPUS;
+
+ for_each_cpu_mask(cpu, hmpd->cpus) {
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
+ min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
+ min_cpu_temp = cpu;
+ }
+ }
+
+ if (min_cpu)
+ *min_cpu = min_cpu_temp;
+
+ return min_load;
+}
+
+/*
+ * Calculate the task starvation
+ * This is the ratio of actually running time vs. runnable time.
+ * If the two are equal the task is getting the cpu time it needs or
+ * it is alone on the cpu and the cpu is fully utilized.
+ */
+static inline unsigned int hmp_task_starvation(struct sched_entity *se)
+{
+ u32 starvation;
+
+ starvation = se->avg.usage_avg_sum * scale_load_down(NICE_0_LOAD);
+ starvation /= (se->avg.runnable_avg_sum + 1);
+
+ return scale_load(starvation);
+}
+
+static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
+{
+ int min_usage;
+ int dest_cpu = NR_CPUS;
+
+ if (hmp_cpu_is_slowest(cpu))
+ return NR_CPUS;
+
+ /* Is the current domain fully loaded? */
+ /* load < ~94% */
+ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
+ if (min_usage < NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the cpu oversubscribed? */
+ /* load < ~194% */
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the task alone on the cpu? */
+ if (cpu_rq(cpu)->cfs.nr_running < 2)
+ return NR_CPUS;
+
+ /* Is the task actually starving? */
+ if (hmp_task_starvation(se) > 768) /* <25% waiting */
+ return NR_CPUS;
+
+ /* Does the slower domain have spare cycles? */
+ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
+ /* load > 50% */
+ if (min_usage > NICE_0_LOAD/2)
+ return NR_CPUS;
+
+ if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
+ return dest_cpu;
+ return NR_CPUS;
+}
+#endif /* CONFIG_SCHED_HMP */
+
/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
@@ -3399,6 +3968,24 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
unlock:
rcu_read_unlock();
+#ifdef CONFIG_SCHED_HMP
+ if (hmp_up_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ hmp_next_up_delay(&p->se, new_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ if (hmp_down_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+ hmp_next_down_delay(&p->se, new_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ /* Make sure that the task stays in its previous hmp domain */
+ if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus))
+ return prev_cpu;
+#endif
+
return new_cpu;
}
@@ -3925,7 +4512,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
* 1) task is cache cold, or
* 2) too many balance attempts have failed.
*/
-
tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
if (!tsk_cache_hot ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
@@ -5659,6 +6245,286 @@ need_kick:
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
#endif
+#ifdef CONFIG_SCHED_HMP
+/* Check if task should migrate to a faster cpu */
+static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ u64 now;
+
+ if (hmp_cpu_is_fastest(cpu))
+ return 0;
+
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ /* Filter by task priority */
+ if (p->prio >= hmp_up_prio)
+ return 0;
+#endif
+
+ /* Let the task load settle before doing another up migration */
+ now = cfs_rq_clock_task(cfs_rq);
+ if (((now - se->avg.hmp_last_up_migration) >> 10)
+ < hmp_next_up_threshold)
+ return 0;
+
+ if (se->avg.load_avg_ratio > hmp_up_threshold) {
+ /* Target domain load < ~94% */
+ if (hmp_domain_min_load(hmp_faster_domain(cpu), NULL)
+ > NICE_0_LOAD-64)
+ return 0;
+ if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
+ tsk_cpus_allowed(p)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Check if task should migrate to a slower cpu */
+static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ u64 now;
+
+ if (hmp_cpu_is_slowest(cpu))
+ return 0;
+
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ /* Filter by task priority */
+ if ((p->prio >= hmp_up_prio) &&
+ cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(p))) {
+ return 1;
+ }
+#endif
+
+ /* Let the task load settle before doing another down migration */
+ now = cfs_rq_clock_task(cfs_rq);
+ if (((now - se->avg.hmp_last_down_migration) >> 10)
+ < hmp_next_down_threshold)
+ return 0;
+
+ if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(p))
+ && se->avg.load_avg_ratio < hmp_down_threshold) {
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * hmp_can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ * Ideally this function should be merged with can_migrate_task() to avoid
+ * redundant code.
+ */
+static int hmp_can_migrate_task(struct task_struct *p, struct lb_env *env)
+{
+ int tsk_cache_hot = 0;
+
+ /*
+ * We do not migrate tasks that are:
+ * 1) running (obviously), or
+ * 2) cannot be migrated to this CPU due to cpus_allowed
+ */
+ if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
+ schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+ return 0;
+ }
+ env->flags &= ~LBF_ALL_PINNED;
+
+ if (task_running(env->src_rq, p)) {
+ schedstat_inc(p, se.statistics.nr_failed_migrations_running);
+ return 0;
+ }
+
+ /*
+ * Aggressive migration if:
+ * 1) task is cache cold, or
+ * 2) too many balance attempts have failed.
+ */
+
+ tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
+ if (!tsk_cache_hot ||
+ env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+ if (tsk_cache_hot) {
+ schedstat_inc(env->sd, lb_hot_gained[env->idle]);
+ schedstat_inc(p, se.statistics.nr_forced_migrations);
+ }
+#endif
+ return 1;
+ }
+
+ return 1;
+}
+
+/*
+ * move_specific_task tries to move a specific task.
+ * Returns 1 if successful and 0 otherwise.
+ * Called with both runqueues locked.
+ */
+static int move_specific_task(struct lb_env *env, struct task_struct *pm)
+{
+ struct task_struct *p, *n;
+
+ list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
+ if (throttled_lb_pair(task_group(p), env->src_rq->cpu,
+ env->dst_cpu))
+ continue;
+
+ if (!hmp_can_migrate_task(p, env))
+ continue;
+ /* Check if we found the right task */
+ if (p != pm)
+ continue;
+
+ move_task(p, env);
+ /*
+ * Right now, this is only the third place move_task()
+ * is called, so we can safely collect move_task()
+ * stats here rather than inside move_task().
+ */
+ schedstat_inc(env->sd, lb_gained[env->idle]);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * hmp_active_task_migration_cpu_stop is run by cpu stopper and used to
+ * migrate a specific task from one runqueue to another.
+ * hmp_force_up_migration uses this to push a currently running task
+ * off a runqueue.
+ * Based on active_load_balance_stop_cpu and can potentially be merged.
+ */
+static int hmp_active_task_migration_cpu_stop(void *data)
+{
+ struct rq *busiest_rq = data;
+ struct task_struct *p = busiest_rq->migrate_task;
+ int busiest_cpu = cpu_of(busiest_rq);
+ int target_cpu = busiest_rq->push_cpu;
+ struct rq *target_rq = cpu_rq(target_cpu);
+ struct sched_domain *sd;
+
+ raw_spin_lock_irq(&busiest_rq->lock);
+ /* make sure the requested cpu hasn't gone down in the meantime */
+ if (unlikely(busiest_cpu != smp_processor_id() ||
+ !busiest_rq->active_balance)) {
+ goto out_unlock;
+ }
+ /* Is there any task to move? */
+ if (busiest_rq->nr_running <= 1)
+ goto out_unlock;
+ /* Task has migrated meanwhile, abort forced migration */
+ if (task_rq(p) != busiest_rq)
+ goto out_unlock;
+ /*
+ * This condition is "impossible", if it occurs
+ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
+ */
+ BUG_ON(busiest_rq == target_rq);
+
+ /* move a task from busiest_rq to target_rq */
+ double_lock_balance(busiest_rq, target_rq);
+
+ /* Search for an sd spanning us and the target CPU. */
+ rcu_read_lock();
+ for_each_domain(target_cpu, sd) {
+ if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
+ break;
+ }
+
+ if (likely(sd)) {
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ };
+
+ schedstat_inc(sd, alb_count);
+
+ if (move_specific_task(&env, p))
+ schedstat_inc(sd, alb_pushed);
+ else
+ schedstat_inc(sd, alb_failed);
+ }
+ rcu_read_unlock();
+ double_unlock_balance(busiest_rq, target_rq);
+out_unlock:
+ busiest_rq->active_balance = 0;
+ raw_spin_unlock_irq(&busiest_rq->lock);
+ return 0;
+}
+
+static DEFINE_SPINLOCK(hmp_force_migration);
+
+/*
+ * hmp_force_up_migration checks runqueues for tasks that need to
+ * be actively migrated to a faster cpu.
+ */
+static void hmp_force_up_migration(int this_cpu)
+{
+ int cpu;
+ struct sched_entity *curr;
+ struct rq *target;
+ unsigned long flags;
+ unsigned int force;
+ struct task_struct *p;
+
+ if (!spin_trylock(&hmp_force_migration))
+ return;
+ for_each_online_cpu(cpu) {
+ force = 0;
+ target = cpu_rq(cpu);
+ raw_spin_lock_irqsave(&target->lock, flags);
+ curr = target->cfs.curr;
+ if (!curr || !entity_is_task(curr)) {
+ raw_spin_unlock_irqrestore(&target->lock, flags);
+ continue;
+ }
+ p = task_of(curr);
+ if (hmp_up_migration(cpu, curr)) {
+ if (!target->active_balance) {
+ target->active_balance = 1;
+ target->push_cpu = hmp_select_faster_cpu(p, cpu);
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 1);
+ hmp_next_up_delay(&p->se, target->push_cpu);
+ }
+ }
+ if (!force && !target->active_balance) {
+ /*
+ * For now we just check the currently running task.
+ * Selecting the lightest task for offloading will
+ * require extensive book keeping.
+ */
+ target->push_cpu = hmp_offload_down(cpu, curr);
+ if (target->push_cpu < NR_CPUS) {
+ target->active_balance = 1;
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 2);
+ hmp_next_down_delay(&p->se, target->push_cpu);
+ }
+ }
+ raw_spin_unlock_irqrestore(&target->lock, flags);
+ if (force)
+ stop_one_cpu_nowait(cpu_of(target),
+ hmp_active_task_migration_cpu_stop,
+ target, &target->active_balance_work);
+ }
+ spin_unlock(&hmp_force_migration);
+}
+#else
+static void hmp_force_up_migration(int this_cpu) { }
+#endif /* CONFIG_SCHED_HMP */
+
/*
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
@@ -5670,6 +6536,8 @@ static void run_rebalance_domains(struct softirq_action *h)
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
+ hmp_force_up_migration(this_cpu);
+
rebalance_domains(this_cpu, idle);
/*
@@ -6169,6 +7037,139 @@ __init void init_sched_fair_class(void)
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
cpu_notifier(sched_ilb_notifier, 0);
#endif
+
+#ifdef CONFIG_SCHED_HMP
+ hmp_cpu_mask_setup();
+#endif
#endif /* SMP */
}
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+static u32 cpufreq_calc_scale(u32 min, u32 max, u32 curr)
+{
+ u32 result = curr / max;
+ return result;
+}
+
+/* Called when the CPU Frequency is changed.
+ * Once for each CPU.
+ */
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ int cpu = freq->cpu;
+ struct cpufreq_extents *extents;
+
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return NOTIFY_OK;
+
+ /* if dynamic load scale is disabled, set the load scale to 1.0 */
+ if (!hmp_data.freqinvar_load_scale_enabled) {
+ freq_scale[cpu].curr_scale = 1024;
+ return NOTIFY_OK;
+ }
+
+ extents = &freq_scale[cpu];
+ if (extents->flags & SCHED_LOAD_FREQINVAR_SINGLEFREQ) {
+ /* If our governor was recognised as a single-freq governor,
+ * use 1.0
+ */
+ extents->curr_scale = 1024;
+ } else {
+ extents->curr_scale = cpufreq_calc_scale(extents->min,
+ extents->max, freq->new);
+ }
+
+ return NOTIFY_OK;
+}
+
+/* Called when the CPUFreq governor is changed.
+ * Only called for the CPUs which are actually changed by the
+ * userspace.
+ */
+static int cpufreq_policy_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_extents *extents;
+ int cpu, singleFreq = 0;
+ static const char performance_governor[] = "performance";
+ static const char powersave_governor[] = "powersave";
+
+ if (event == CPUFREQ_START)
+ return 0;
+
+ if (event != CPUFREQ_INCOMPATIBLE)
+ return 0;
+
+ /* CPUFreq governors do not accurately report the range of
+ * CPU Frequencies they will choose from.
+ * We recognise performance and powersave governors as
+ * single-frequency only.
+ */
+ if (!strncmp(policy->governor->name, performance_governor,
+ strlen(performance_governor)) ||
+ !strncmp(policy->governor->name, powersave_governor,
+ strlen(powersave_governor)))
+ singleFreq = 1;
+
+ /* Make sure that all CPUs impacted by this policy are
+ * updated since we will only get a notification when the
+ * user explicitly changes the policy on a CPU.
+ */
+ for_each_cpu(cpu, policy->cpus) {
+ extents = &freq_scale[cpu];
+ extents->max = policy->max >> SCHED_FREQSCALE_SHIFT;
+ extents->min = policy->min >> SCHED_FREQSCALE_SHIFT;
+ if (!hmp_data.freqinvar_load_scale_enabled) {
+ extents->curr_scale = 1024;
+ } else if (singleFreq) {
+ extents->flags |= SCHED_LOAD_FREQINVAR_SINGLEFREQ;
+ extents->curr_scale = 1024;
+ } else {
+ extents->flags &= ~SCHED_LOAD_FREQINVAR_SINGLEFREQ;
+ extents->curr_scale = cpufreq_calc_scale(extents->min,
+ extents->max, policy->cur);
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+static struct notifier_block cpufreq_policy_notifier = {
+ .notifier_call = cpufreq_policy_callback,
+};
+
+static int __init register_sched_cpufreq_notifier(void)
+{
+ int ret = 0;
+
+ /* init safe defaults since there are no policies at registration */
+ for (ret = 0; ret < CONFIG_NR_CPUS; ret++) {
+ /* safe defaults */
+ freq_scale[ret].max = 1024;
+ freq_scale[ret].min = 1024;
+ freq_scale[ret].curr_scale = 1024;
+ }
+
+ pr_info("sched: registering cpufreq notifiers for scale-invariant loads\n");
+ ret = cpufreq_register_notifier(&cpufreq_policy_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret != -EINVAL)
+ ret = cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return ret;
+}
+
+core_initcall(register_sched_cpufreq_notifier);
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fc886441436a..6a95904661e0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -113,7 +113,7 @@ struct task_group {
atomic_t load_weight;
atomic64_t load_avg;
- atomic_t runnable_avg;
+ atomic_t runnable_avg, usage_avg;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -243,7 +243,7 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */
/* These always depend on CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
- u32 tg_runnable_contrib;
+ u32 tg_runnable_contrib, tg_usage_contrib;
u64 tg_load_contrib;
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -425,6 +425,9 @@ struct rq {
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
+#ifdef CONFIG_SCHED_HMP
+ struct task_struct *migrate_task;
+#endif
/* cpu of this runqueue: */
int cpu;
int online;
@@ -547,6 +550,12 @@ DECLARE_PER_CPU(int, sd_llc_id);
extern int group_balance_cpu(struct sched_group *sg);
+#ifdef CONFIG_SCHED_HMP
+static LIST_HEAD(hmp_domains);
+DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain);
+#define hmp_cpu_domain(cpu) (per_cpu(hmp_cpu_domain, (cpu)))
+#endif /* CONFIG_SCHED_HMP */
+
#endif /* CONFIG_SMP */
#include "stats.h"
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c88878db491e..419f7e4a6f77 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -105,6 +105,7 @@ extern unsigned int core_pipe_limit;
#endif
extern int pid_max;
extern int min_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
@@ -1250,6 +1251,13 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e5125677efa0..3c13e46d7d24 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2899,6 +2899,8 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
+ buf[cnt] = 0;
+
trace_set_options(buf);
*ppos += cnt;
@@ -3452,7 +3454,7 @@ static int tracing_wait_pipe(struct file *filp)
return -EINTR;
/*
- * We block until we read something and tracing is enabled.
+ * We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
@@ -3460,7 +3462,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (tracing_is_enabled() && iter->pos)
+ if (!tracing_is_enabled() && iter->pos)
break;
}
@@ -4815,10 +4817,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
return ret;
if (buffer) {
- if (val)
+ mutex_lock(&trace_types_lock);
+ if (val) {
ring_buffer_record_on(buffer);
- else
+ if (current_trace->start)
+ current_trace->start(tr);
+ } else {
ring_buffer_record_off(buffer);
+ if (current_trace->stop)
+ current_trace->stop(tr);
+ }
+ mutex_unlock(&trace_types_lock);
}
(*ppos)++;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 67604e599384..aaf8baf86357 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -725,8 +725,9 @@ config DEBUG_LOCKING_API_SELFTESTS
mutexes and rwsems.
config STACKTRACE
- bool
+ bool "Stacktrace"
depends on STACKTRACE_SUPPORT
+ default y
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 145dec5267c9..5fbed5caba6e 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
if (!rmap)
return NULL;
+ kref_init(&rmap->refcount);
rmap->obj = (void **)((char *)rmap + obj_offset);
/* Initially assign CPUs to objects on a rota, since we have
@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
}
EXPORT_SYMBOL(alloc_cpu_rmap);
+/**
+ * cpu_rmap_release - internal reclaiming helper called from kref_put
+ * @ref: kref to struct cpu_rmap
+ */
+static void cpu_rmap_release(struct kref *ref)
+{
+ struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
+ kfree(rmap);
+}
+
+/**
+ * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+{
+ kref_get(&rmap->refcount);
+}
+
+/**
+ * cpu_rmap_put - release ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+int cpu_rmap_put(struct cpu_rmap *rmap)
+{
+ return kref_put(&rmap->refcount, cpu_rmap_release);
+}
+EXPORT_SYMBOL(cpu_rmap_put);
+
/* Reevaluate nearest object for given CPU, comparing with the given
* neighbours at the given distance.
*/
@@ -197,8 +227,7 @@ struct irq_glue {
* free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
* @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
*
- * Must be called in process context, before freeing the IRQs, and
- * without holding any locks required by global workqueue items.
+ * Must be called in process context, before freeing the IRQs.
*/
void free_irq_cpu_rmap(struct cpu_rmap *rmap)
{
@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
glue = rmap->obj[index];
irq_set_affinity_notifier(glue->notify.irq, NULL);
}
- irq_run_affinity_notifiers();
- kfree(rmap);
+ cpu_rmap_put(rmap);
}
EXPORT_SYMBOL(free_irq_cpu_rmap);
+/**
+ * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
+ * @notify: struct irq_affinity_notify passed by irq/manage.c
+ * @mask: cpu mask for new SMP affinity
+ *
+ * This is executed in workqueue context.
+ */
static void
irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
{
@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
+/**
+ * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
+ * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
+ */
static void irq_cpu_rmap_release(struct kref *ref)
{
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
+
+ cpu_rmap_put(glue->rmap);
kfree(glue);
}
@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
glue->notify.notify = irq_cpu_rmap_notify;
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
+ cpu_rmap_get(rmap);
glue->index = cpu_rmap_add(rmap, glue);
rc = irq_set_affinity_notifier(irq, &glue->notify);
- if (rc)
+ if (rc) {
+ cpu_rmap_put(glue->rmap);
kfree(glue);
+ }
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4f56a11d67fa..c0e31fe2fabf 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
}
-__always_inline void
-__rb_erase_color(struct rb_node *parent, struct rb_root *root,
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root,
}
}
}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ ____rb_erase_color(parent, root, augment_rotate);
+}
EXPORT_SYMBOL(__rb_erase_color);
/*
@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root)
{
- rb_erase_augmented(node, root, &dummy_callbacks);
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_erase);
diff --git a/linaro/configs/android.conf b/linaro/configs/android.conf
new file mode 100644
index 000000000000..aca22831b22b
--- /dev/null
+++ b/linaro/configs/android.conf
@@ -0,0 +1,32 @@
+CONFIG_IPV6=y
+# CONFIG_IPV6_SIT is not set
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_HAS_WAKELOCK=y
+CONFIG_WAKELOCK=y
+CONFIG_USER_WAKELOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_DM_CRYPT=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+CONFIG_FB_EARLYSUSPEND=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_SWITCH=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_FUSE_FS=y
diff --git a/linaro/configs/arndale.conf b/linaro/configs/arndale.conf
new file mode 100644
index 000000000000..711f9be3192a
--- /dev/null
+++ b/linaro/configs/arndale.conf
@@ -0,0 +1,67 @@
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=2
+# CONFIG_ARCH_EXYNOS4 is not set
+CONFIG_ARCH_EXYNOS5=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_COMPACTION is not set
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init= mem=256M"
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AX88796=y
+CONFIG_AX88796_93CX6=y
+CONFIG_SMC91X=y
+CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
+CONFIG_USB_USBNET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_S3C2410=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_S5M8767=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_S5P=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_MMC=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_S3C_UART2=y
+CONFIG_EARLY_PRINTK=y
diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf
new file mode 100644
index 000000000000..8cc2be049a41
--- /dev/null
+++ b/linaro/configs/big-LITTLE-MP.conf
@@ -0,0 +1,13 @@
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_NO_HZ=y
+CONFIG_SCHED_MC=y
+CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y
+CONFIG_SCHED_HMP=y
+CONFIG_HMP_FAST_CPU_MASK=""
+CONFIG_HMP_SLOW_CPU_MASK=""
+CONFIG_HMP_VARIABLE_SCALE=y
+CONFIG_HMP_FREQUENCY_INVARIANT_SCALE=y
+CONFIG_SCHED_HMP_PRIO_FILTER=y
+CONFIG_SCHED_HMP_PRIO_FILTER_VAL=5
diff --git a/linaro/configs/highbank.conf b/linaro/configs/highbank.conf
new file mode 100644
index 000000000000..4a6fe0977596
--- /dev/null
+++ b/linaro/configs/highbank.conf
@@ -0,0 +1,39 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARCH_HIGHBANK=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMDLINE="console=ttyAMA0"
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_SATA_HIGHBANK=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_CALXEDA_XGMAC=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_IPMI_HANDLER=y
+CONFIG_IPMI_SI=y
+CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_PL061=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_HIGHBANK_MC=y
+CONFIG_EDAC_HIGHBANK_L2=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y
diff --git a/linaro/configs/imx5.conf b/linaro/configs/imx5.conf
new file mode 100644
index 000000000000..0196c572bb14
--- /dev/null
+++ b/linaro/configs/imx5.conf
@@ -0,0 +1,105 @@
+CONFIG_ARCH_MXC=y
+CONFIG_MXC_PWM=y
+CONFIG_SOC_IMX6Q=y
+CONFIG_HIGHMEM=y
+CONFIG_COMPACTION=y
+CONFIG_CMDLINE="noinitrd console=ttymxc0,115200 root=/dev/nfs nfsroot=192.168.0.101:/shared/nfs ip=dhcp"
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SMC91X=y
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_STE10XP=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_MPR121=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_IMX=y
+CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_ALGOBIT=y
+CONFIG_I2C_ALGOPCF=y
+CONFIG_I2C_ALGOPCA=y
+CONFIG_I2C_IMX=y
+CONFIG_SPI=y
+CONFIG_SPI_IMX=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_SUPPLY_DEBUG=y
+CONFIG_TEST_POWER=y
+CONFIG_MFD_MC13XXX=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_MC13892=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x16=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_IMX_SOC=y
+CONFIG_HIDRAW=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MXC=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
+CONFIG_NEW_LEDS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_DMADEVICES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
diff --git a/linaro/configs/linaro-base.conf b/linaro/configs/linaro-base.conf
new file mode 100644
index 000000000000..23638c3d214b
--- /dev/null
+++ b/linaro/configs/linaro-base.conf
@@ -0,0 +1,90 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_HOTPLUG=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_THUMB2_KERNEL=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_IDLE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_LRO is not set
+CONFIG_NETFILTER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_NAND=y
+CONFIG_NETDEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_PROC_DEVICETREE=y
diff --git a/linaro/configs/null.conf b/linaro/configs/null.conf
new file mode 100644
index 000000000000..1f71535af70b
--- /dev/null
+++ b/linaro/configs/null.conf
@@ -0,0 +1 @@
+### null config just for testing multiple config frags
diff --git a/linaro/configs/omap4.conf b/linaro/configs/omap4.conf
new file mode 100644
index 000000000000..1cb9960dbb2b
--- /dev/null
+++ b/linaro/configs/omap4.conf
@@ -0,0 +1,204 @@
+CONFIG_TASKSTATS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PERF_COUNTERS=y
+CONFIG_DEBUG_PERF_USE_VMALLOC=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_ARCH_OMAP=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP5_SEVM_PALMAS=y
+CONFIG_OMAP_MUX_DEBUG=y
+CONFIG_OMAP_SCM_DEV=y
+CONFIG_OMAP_MBOX_FWK=y
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
+# CONFIG_MACH_OMAP_4430SDP is not set
+CONFIG_SECURE_REGS=y
+CONFIG_OMAP4_ERRATA_I688=y
+CONFIG_ARM_THUMBEE=y
+CONFIG_PL310_ERRATA_753970=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyO2,115200n8 root=/dev/mmcblk0p2 rootwait earlyprintk earlycon=ttyO2.115200n8"
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_PM_DEBUG=y
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_WILINK=m
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_LIB80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BMP085=m
+CONFIG_TI_ST=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_WL_TI=y
+CONFIG_WL12XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_OMAP4=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_QUANTUM_OBP=m
+CONFIG_INPUT_TSL2771=m
+CONFIG_INPUT_PALMAS_PWRBUTTON=y
+CONFIG_INPUT_MPU6050_GYRO=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_SENSORS_OMAP_TEMP_SENSOR=y
+CONFIG_SENSORS_TMP102=y
+CONFIG_OMAP4460PLUS_SCM=y
+CONFIG_OMAP4460PLUS_TEMP_SENSOR=y
+CONFIG_TWL6040_CORE=y
+CONFIG_MFD_PALMAS=y
+CONFIG_MFD_PALMAS_GPADC=y
+CONFIG_MFD_PALMAS_RESOURCE=y
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_TWL4030=y
+CONFIG_REGULATOR_PALMAS=y
+CONFIG_DRM=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_OMAP2_VRAM_SIZE=16
+CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS=y
+CONFIG_OMAP2_DSS_RFBI=y
+CONFIG_OMAP2_DSS_DSI=y
+CONFIG_PANEL_GENERIC_DPI=y
+CONFIG_PANEL_TFP410=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_DEBUG_VERBOSE=y
+# CONFIG_SND_ARM is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=y
+CONFIG_SND_OMAP_SOC_SDP4430=y
+CONFIG_SND_OMAP_SOC_OMAP_HDMI=y
+CONFIG_HIDRAW=y
+CONFIG_USB_HIDDEV=y
+CONFIG_HID_APPLE=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DWC3=m
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_MUSB_HDRC=m
+CONFIG_USB_MUSB_OMAP2PLUS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=m
+CONFIG_USB_OMAP=m
+CONFIG_USB_GADGET_MUSB_HDRC=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_TWL4030_USB=y
+CONFIG_TWL6030_USB=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_OMAP_HS=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TWL4030=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_DRM_OMAP=y
+CONFIG_THERMAL_FRAMEWORK=y
+CONFIG_OMAP_THERMAL=y
+CONFIG_OMAP_DIE_GOVERNOR=y
+CONFIG_OMAP_GPU_GOVERNOR=y
+CONFIG_DRM_OMAP_DCE=m
+CONFIG_HWSPINLOCK_OMAP=y
+CONFIG_OMAP_IOMMU=y
+CONFIG_OMAP_IOVMM=y
+CONFIG_OMAP_REMOTEPROC_IPU=y
+CONFIG_OMAP_REMOTEPROC_DSP=y
+CONFIG_OMAP_RPMSG_RESMGR=m
+CONFIG_RPMSG_OMX=m
+CONFIG_VIRT_DRIVERS=y
+CONFIG_STM_FW=y
+CONFIG_STM_ARM=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_EVENT_POWER_TRACING_DEPRECATED is not set
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_ARM_UNWIND is not set
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CPU_IDLE is not set
diff --git a/linaro/configs/origen.conf b/linaro/configs/origen.conf
new file mode 100644
index 000000000000..0a1cec8c0ab0
--- /dev/null
+++ b/linaro/configs/origen.conf
@@ -0,0 +1,88 @@
+CONFIG_ARCH_EXYNOS=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=2
+CONFIG_S3C24XX_PWM=y
+CONFIG_MACH_SMDKC210=y
+CONFIG_MACH_ARMLEX4210=y
+CONFIG_MACH_UNIVERSAL_C210=y
+CONFIG_MACH_NURI=y
+CONFIG_MACH_ORIGEN=y
+CONFIG_MACH_SMDK4412=y
+CONFIG_MACH_EXYNOS4_DT=y
+CONFIG_NR_CPUS=2
+CONFIG_AEABI=y
+CONFIG_CMDLINE="root=/dev/mmcblk0p1 rw rootwait console=ttySAC2,115200"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CFG80211=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_S3C2410=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_SENSORS_EXYNOS4_TMU=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_MFD_MAX8997=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_MAX8997=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_FIMC=y
+CONFIG_VIDEO_S5P_FIMC=y
+CONFIG_VIDEO_SAMSUNG_S5P_TV=y
+CONFIG_VIDEO_SAMSUNG_S5P_HDMI=y
+CONFIG_VIDEO_SAMSUNG_S5P_SDO=y
+CONFIG_VIDEO_SAMSUNG_S5P_MIXER=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_G2D=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=y
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=y
+CONFIG_FB=y
+CONFIG_FB_S3C=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_LOGO=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_S5P=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_S3C=y
+CONFIG_MMC_SDHCI_S3C_DMA=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_S3C=y
+CONFIG_DEBUG_S3C_UART2=y
diff --git a/linaro/configs/u8500.conf b/linaro/configs/u8500.conf
new file mode 100644
index 000000000000..61c1f232de80
--- /dev/null
+++ b/linaro/configs/u8500.conf
@@ -0,0 +1,118 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_U8500=y
+CONFIG_MACH_HREFV60=y
+CONFIG_MACH_SNOWBALL=y
+CONFIG_MACH_UX500_DT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IPV6=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_IPV6_SIT=m
+CONFIG_NETFILTER=y
+CONFIG_PHONET=y
+# CONFIG_WIRELESS is not set
+CONFIG_CAIF=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SENSORS_BH1780=y
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+CONFIG_SMSC_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_NOMADIK=y
+CONFIG_KEYBOARD_STMPE=y
+CONFIG_KEYBOARD_TC3589X=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_BU21013=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AB8500_PONKEY=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_NOMADIK=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_STMPE=y
+CONFIG_GPIO_TC3589X=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_AB8500_BM=y
+CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL=y
+CONFIG_MFD_STMPE=y
+CONFIG_MFD_TC3589X=y
+CONFIG_AB8500_CORE=y
+CONFIG_REGULATOR_AB8500=y
+CONFIG_USB_GADGET=y
+CONFIG_AB8500_USB=y
+CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_LM3530=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_LP5521=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AB8500=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_STE_DMA40=y
+CONFIG_STAGING=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=m
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_USER=y
diff --git a/linaro/configs/ubuntu-minimal.conf b/linaro/configs/ubuntu-minimal.conf
new file mode 100644
index 000000000000..2c6a13eb46c9
--- /dev/null
+++ b/linaro/configs/ubuntu-minimal.conf
@@ -0,0 +1,24 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_SECCOMP=y
+CONFIG_CC_STACKPROTECTOR=y
+CONFIG_SYN_COOKIES=y
+CONFIG_IPV6=y
+CONFIG_NETLABEL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_DEVKMEM is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_STRICT_DEVMEM=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY_APPARMOR=y
diff --git a/linaro/configs/ubuntu.conf b/linaro/configs/ubuntu.conf
new file mode 100644
index 000000000000..b65be649bb18
--- /dev/null
+++ b/linaro/configs/ubuntu.conf
@@ -0,0 +1,2129 @@
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_INIT_PASS_ALL_PARAMS=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_KERNEL_GZIP=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_FHANDLE=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_CGROUPS is not set
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+CONFIG_RELAY=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PERF_EVENTS=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TRACEPOINTS=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KRETPROBES=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_EFI_PARTITION=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_CFQ=y
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+CONFIG_TICK_ONESHOT=y
+CONFIG_VMSPLIT_3G=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_HZ=128
+CONFIG_AEABI=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_CLEANCACHE=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+CONFIG_SECCOMP=y
+CONFIG_CC_STACKPROTECTOR=y
+CONFIG_USE_OF=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_OPP=y
+CONFIG_PM_CLK=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_IPCOMP=m
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_FIB_TRIE_STATS=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_DEFAULT_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_ACCT=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_BROADCAST=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ECN=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_TAB_BITS=12
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_SH_TAB_BITS=8
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_DEFRAG_IPV6=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_DECNET_NF_GRABULATOR=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+CONFIG_IP_DCCP_CCID3=y
+CONFIG_IP_DCCP_TFRC_LIB=y
+CONFIG_NET_DCCPPROBE=m
+CONFIG_IP_SCTP=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_SCTP_HMAC_MD5=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_TIPC=m
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_TAG_DSA=y
+CONFIG_NET_DSA_TAG_EDSA=y
+CONFIG_NET_DSA_TAG_TRAILER=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_DECNET=m
+CONFIG_LLC=m
+CONFIG_LLC2=m
+CONFIG_IPX=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_WAN_ROUTER=m
+CONFIG_PHONET=m
+CONFIG_IEEE802154=m
+CONFIG_IEEE802154_6LOWPAN=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_SCH_FIFO=y
+CONFIG_DCB=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_BATMAN_ADV=m
+CONFIG_OPENVSWITCH=m
+CONFIG_BQL=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+CONFIG_AX25_DAMA_SLAVE=y
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_BAYCOM_PAR=m
+CONFIG_BAYCOM_EPP=m
+CONFIG_YAM=m
+CONFIG_CAN=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_BCM=m
+CONFIG_CAN_GW=m
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_SLCAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_MCP251X=m
+CONFIG_CAN_SJA1000=m
+CONFIG_CAN_SJA1000_ISA=m
+CONFIG_CAN_SJA1000_PLATFORM=m
+CONFIG_CAN_C_CAN=m
+CONFIG_CAN_C_CAN_PLATFORM=m
+CONFIG_CAN_CC770=m
+CONFIG_CAN_CC770_ISA=m
+CONFIG_CAN_CC770_PLATFORM=m
+CONFIG_CAN_EMS_USB=m
+CONFIG_CAN_ESD_USB2=m
+CONFIG_CAN_PEAK_USB=m
+CONFIG_CAN_SOFTING=m
+CONFIG_IRDA=m
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_ULTRA=y
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+CONFIG_IRDA_DEBUG=y
+CONFIG_IRTTY_SIR=m
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_TOIM3232_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+CONFIG_KINGSUN_DONGLE=m
+CONFIG_KSDAZZLE_DONGLE=m
+CONFIG_KS959_DONGLE=m
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_MCS_FIR=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
+CONFIG_BT_ATH3K=m
+CONFIG_BT_WILINK=m
+CONFIG_AF_RXRPC=m
+CONFIG_RXKAD=m
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_REGULATOR=m
+CONFIG_RFKILL_GPIO=m
+CONFIG_NET_9P=m
+CONFIG_CAIF=m
+CONFIG_CAIF_NETDEV=m
+CONFIG_CAIF_USB=m
+CONFIG_CEPH_LIB=m
+CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
+CONFIG_NFC=m
+CONFIG_NFC_NCI=m
+CONFIG_PN544_NFC=m
+CONFIG_NFC_PN533=m
+CONFIG_NFC_WILINK=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_SPI=y
+CONFIG_DMA_SHARED_BUFFER=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+CONFIG_MTD_AFS_PARTS=m
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_AR7_PARTS=m
+CONFIG_HAVE_MTD_OTP=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+CONFIG_RFD_FTL=m
+CONFIG_SSFDC=m
+CONFIG_SM_FTL=m
+CONFIG_MTD_SWAP=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP_OF=m
+CONFIG_MTD_IMPA7=m
+CONFIG_MTD_GPIO_ADDR=m
+CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_LATCH_ADDR=m
+CONFIG_MTD_DATAFLASH=m
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_MTD_M25P80=m
+CONFIG_M25PXX_USE_FAST_READ=y
+CONFIG_MTD_SST25L=m
+CONFIG_MTD_SLRAM=m
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+CONFIG_MTD_BLOCK2MTD=m
+CONFIG_MTD_DOC2000=m
+CONFIG_MTD_DOC2001=m
+CONFIG_MTD_DOC2001PLUS=m
+CONFIG_MTD_DOCG3=m
+CONFIG_BCH_CONST_M=14
+CONFIG_BCH_CONST_T=4
+CONFIG_MTD_ONENAND=m
+CONFIG_MTD_DOCPROBE=m
+CONFIG_MTD_DOCECC=m
+CONFIG_MTD_DOCPROBE_ADDRESS=0x0
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_BCH=y
+CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_NAND_GPIO=m
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_DISKONCHIP=m
+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
+CONFIG_MTD_NAND_DOCG4=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_ALAUDA=m
+CONFIG_MTD_ONENAND_GENERIC=m
+CONFIG_MTD_ONENAND_2X_PROGRAM=y
+CONFIG_MTD_ONENAND_SIM=m
+CONFIG_MTD_LPDDR=m
+CONFIG_MTD_QINFO_PROBE=m
+CONFIG_DTC=y
+CONFIG_OF=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_I2C=y
+CONFIG_OF_NET=y
+CONFIG_OF_SPI=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_MTD=y
+CONFIG_PARPORT=m
+CONFIG_PARPORT_AX88796=m
+CONFIG_PARPORT_1284=y
+CONFIG_PARPORT_NOT_PC=y
+CONFIG_BLK_DEV=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_UB=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+CONFIG_ATA_OVER_ETH=m
+CONFIG_MG_DISK=m
+CONFIG_MG_DISK_RES=0
+CONFIG_BLK_DEV_RBD=m
+CONFIG_SENSORS_LIS3LV02D=m
+CONFIG_AD525X_DPOT=m
+CONFIG_AD525X_DPOT_I2C=m
+CONFIG_AD525X_DPOT_SPI=m
+CONFIG_ICS932S401=m
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_APDS9802ALS=m
+CONFIG_ISL29003=m
+CONFIG_ISL29020=m
+CONFIG_SENSORS_TSL2550=m
+CONFIG_SENSORS_BH1780=m
+CONFIG_SENSORS_BH1770=m
+CONFIG_SENSORS_APDS990X=m
+CONFIG_HMC6352=m
+CONFIG_DS1682=m
+CONFIG_USB_SWITCH_FSA9480=m
+CONFIG_C2PORT=m
+CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT25=m
+CONFIG_EEPROM_LEGACY=m
+CONFIG_EEPROM_MAX6875=m
+CONFIG_EEPROM_93XX46=m
+CONFIG_IWMC3200TOP=m
+CONFIG_SENSORS_LIS3_SPI=m
+CONFIG_SENSORS_LIS3_I2C=m
+CONFIG_SCSI_MOD=y
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=m
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_PROC_FS=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_WAIT_SCAN=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_TGT_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_DM_BUFIO=m
+CONFIG_DM_PERSISTENT_DATA=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_TCM_FC=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NET_CORE=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_MII=y
+CONFIG_IEEE802154_DRIVERS=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_ATM_DRIVERS=y
+CONFIG_ATM_DUMMY=m
+CONFIG_ATM_TCP=m
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_SPI_SLAVE=m
+CONFIG_CAIF_HSI=m
+CONFIG_ETHERNET=y
+CONFIG_B44=m
+CONFIG_CS89x0=m
+CONFIG_CS89x0_PLATFORM=y
+CONFIG_DM9000=m
+CONFIG_DNET=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_MDIO_GPIO=m
+CONFIG_PLIP=m
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLHC=y
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_NET_CX82310_ETH=m
+CONFIG_USB_NET_KALMIA=m
+CONFIG_USB_NET_QMI_WWAN=m
+CONFIG_USB_HSO=m
+CONFIG_USB_NET_INT51X1=m
+CONFIG_USB_CDC_PHONET=m
+CONFIG_USB_IPHETH=m
+CONFIG_USB_SIERRA_NET=m
+CONFIG_USB_VL600=m
+CONFIG_WLAN=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_LIBERTAS_THINFIRM_USB=m
+CONFIG_AT76C50X_USB=m
+CONFIG_USB_ZD1201=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_ATH_COMMON=m
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_AHB=y
+CONFIG_ATH9K_DEBUGFS=y
+CONFIG_ATH9K_RATE_CONTROL=y
+CONFIG_ATH9K_HTC=m
+CONFIG_ATH9K_HTC_DEBUGFS=y
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+CONFIG_CARL9170_HWRNG=y
+CONFIG_B43=m
+CONFIG_B43_BCMA=y
+# CONFIG_B43_BCMA_EXTRA is not set
+CONFIG_B43_SSB=y
+CONFIG_B43_BCMA_PIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_N=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_PHY_HT=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+CONFIG_BRCMUTIL=m
+CONFIG_BRCMSMAC=m
+CONFIG_BRCMFMAC=m
+CONFIG_BRCMFMAC_SDIO=y
+CONFIG_BRCMFMAC_USB=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_IWM=m
+CONFIG_IWM_TRACING=y
+CONFIG_LIBERTAS_SPI=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_SPI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2X00_LIB_DEBUGFS=y
+CONFIG_RTL8192CU=m
+CONFIG_RTLWIFI=m
+CONFIG_RTL8192C_COMMON=m
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL12XX_MENU=m
+CONFIG_WL12XX=m
+CONFIG_WL12XX_SPI=m
+CONFIG_WL12XX_SDIO=m
+CONFIG_WL12XX_PLATFORM_DATA=y
+CONFIG_ZD1211RW=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_WAN=y
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=m
+CONFIG_HDLC_RAW_ETH=m
+CONFIG_HDLC_CISCO=m
+CONFIG_HDLC_FR=m
+CONFIG_HDLC_PPP=m
+CONFIG_HDLC_X25=m
+CONFIG_DLCI=m
+CONFIG_DLCI_MAX=8
+CONFIG_WAN_ROUTER_DRIVERS=m
+CONFIG_LAPBETHER=m
+CONFIG_ISDN=y
+CONFIG_ISDN_I4L=m
+CONFIG_ISDN_PPP=y
+CONFIG_ISDN_PPP_VJ=y
+CONFIG_ISDN_MPP=y
+CONFIG_IPPP_FILTER=y
+CONFIG_ISDN_PPP_BSDCOMP=m
+CONFIG_ISDN_AUDIO=y
+CONFIG_ISDN_TTY_FAX=y
+CONFIG_ISDN_X25=y
+CONFIG_ISDN_DIVERSION=m
+CONFIG_ISDN_DRV_HISAX=m
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
+CONFIG_CAPI_TRACE=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIDRV=m
+CONFIG_CAPI_AVM=y
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DRV_GIGASET=m
+CONFIG_GIGASET_I4L=y
+CONFIG_GIGASET_BASE=m
+CONFIG_GIGASET_M105=m
+CONFIG_GIGASET_M101=m
+CONFIG_MISDN=m
+CONFIG_MISDN_DSP=m
+CONFIG_MISDN_L1OIP=m
+CONFIG_MISDN_HFCUSB=m
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_EVBUG=m
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ADP5588=m
+CONFIG_KEYBOARD_ADP5589=m
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_QT1070=m
+CONFIG_KEYBOARD_LKKBD=m
+CONFIG_KEYBOARD_TCA6416=m
+CONFIG_KEYBOARD_TCA8418=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_LM8323=m
+CONFIG_KEYBOARD_MAX7359=m
+CONFIG_KEYBOARD_MCS=m
+CONFIG_KEYBOARD_MPR121=m
+CONFIG_KEYBOARD_NEWTON=m
+CONFIG_KEYBOARD_OPENCORES=m
+CONFIG_KEYBOARD_SAMSUNG=m
+CONFIG_KEYBOARD_STOWAWAY=m
+CONFIG_KEYBOARD_SUNKBD=m
+CONFIG_KEYBOARD_STMPE=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_MOUSE_GPIO=m
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+CONFIG_MOUSE_SYNAPTICS_USB=m
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_ANALOG=m
+CONFIG_JOYSTICK_INTERACT=m
+CONFIG_JOYSTICK_SIDEWINDER=m
+CONFIG_JOYSTICK_WARRIOR=m
+CONFIG_JOYSTICK_MAGELLAN=m
+CONFIG_JOYSTICK_GAMECON=m
+CONFIG_JOYSTICK_TURBOGRAFX=m
+CONFIG_JOYSTICK_JOYDUMP=m
+CONFIG_JOYSTICK_XPAD=m
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_JOYSTICK_WALKERA0701=m
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=m
+CONFIG_TABLET_USB_AIPTEK=m
+CONFIG_TABLET_USB_GTCO=m
+CONFIG_TABLET_USB_HANWANG=m
+CONFIG_TABLET_USB_KBTAB=m
+CONFIG_TABLET_USB_WACOM=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_88PM860X_ONKEY=m
+CONFIG_INPUT_AD714X=m
+CONFIG_INPUT_AD714X_I2C=m
+CONFIG_INPUT_AD714X_SPI=m
+CONFIG_INPUT_BMA150=m
+CONFIG_INPUT_MMA8450=m
+CONFIG_INPUT_MPU3050=m
+CONFIG_INPUT_GP2A=m
+CONFIG_INPUT_GPIO_TILT_POLLED=m
+CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_KEYSPAN_REMOTE=m
+CONFIG_INPUT_KXTJ9=m
+CONFIG_INPUT_POWERMATE=m
+CONFIG_INPUT_YEALINK=m
+CONFIG_INPUT_CM109=m
+CONFIG_INPUT_TWL4030_VIBRA=m
+CONFIG_INPUT_TWL6040_VIBRA=m
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+CONFIG_INPUT_ADXL34X=m
+CONFIG_INPUT_ADXL34X_I2C=m
+CONFIG_INPUT_ADXL34X_SPI=m
+CONFIG_INPUT_CMA3000=m
+CONFIG_INPUT_CMA3000_I2C=m
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
+CONFIG_SERIO_PS2MULT=m
+CONFIG_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_HDLC=m
+CONFIG_TRACE_ROUTER=m
+CONFIG_TRACE_SINK=m
+CONFIG_STALDRV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=32
+CONFIG_SERIAL_8250_DW=m
+CONFIG_SERIAL_MAX3100=m
+CONFIG_SERIAL_MAX3107=m
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_CONSOLE_POLL=y
+CONFIG_SERIAL_OF_PLATFORM=m
+CONFIG_SERIAL_TIMBERDALE=m
+CONFIG_SERIAL_ALTERA_JTAGUART=m
+CONFIG_SERIAL_ALTERA_UART=m
+CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
+CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
+CONFIG_SERIAL_XILINX_PS_UART=m
+CONFIG_TTY_PRINTK=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+CONFIG_HVC_DRIVER=y
+CONFIG_HVC_DCC=y
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_NVRAM=m
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=256
+CONFIG_RAMOOPS=m
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_MUX=m
+CONFIG_I2C_MUX_GPIO=m
+CONFIG_I2C_MUX_PCA9541=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_SMBUS=m
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCF=m
+CONFIG_I2C_ALGOPCA=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+CONFIG_I2C_GPIO=m
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_SIMTEC=m
+CONFIG_I2C_XILINX=m
+CONFIG_I2C_DIOLAN_U2C=m
+CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+CONFIG_I2C_TINY_USB=m
+CONFIG_I2C_STUB=m
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_BUTTERFLY=m
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_LM70_LLP=m
+CONFIG_SPI_OC_TINY=m
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_TLE62X0=m
+CONFIG_HSI=m
+CONFIG_HSI_BOARDINFO=y
+CONFIG_HSI_CHAR=m
+CONFIG_PPS=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PPS_CLIENT_GPIO=m
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC=m
+CONFIG_GPIO_GENERIC_PLATFORM=m
+CONFIG_POWER_SUPPLY=y
+CONFIG_TEST_POWER=m
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=m
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB_POSSIBLE=y
+CONFIG_SSB=m
+CONFIG_SSB_BLOCKIO=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+CONFIG_BCMA_POSSIBLE=y
+CONFIG_BCMA=m
+CONFIG_BCMA_BLOCKIO=y
+CONFIG_MFD_CORE=y
+CONFIG_MFD_88PM860X=y
+CONFIG_MFD_SM501=m
+CONFIG_HTC_EGPIO=y
+CONFIG_HTC_PASIC3=m
+CONFIG_HTC_I2CPLD=y
+CONFIG_MFD_STMPE=y
+CONFIG_STMPE_I2C=y
+CONFIG_STMPE_SPI=y
+CONFIG_MFD_WL1273_CORE=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_DVB_CORE=m
+CONFIG_DVB_NET=y
+CONFIG_VIDEO_MEDIA=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_VIDEO_DEV=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=m
+CONFIG_LIRC_SERIAL_TRANSMITTER=y
+CONFIG_RC_MAP=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_LIRC_CODEC=m
+CONFIG_RC_ATI_REMOTE=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_REDRAT3=m
+CONFIG_IR_STREAMZAP=m
+CONFIG_RC_LOOPBACK=m
+CONFIG_IR_GPIO_CIR=m
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+CONFIG_VIDEO_IR_I2C=m
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+CONFIG_VIDEO_HDPVR=m
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=y
+CONFIG_VIDEO_TLG2300=m
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_TM6000=m
+CONFIG_VIDEO_TM6000_ALSA=m
+CONFIG_VIDEO_TM6000_DVB=m
+CONFIG_VIDEO_USBVISION=m
+CONFIG_USB_PWC=m
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_VIDEO_CPIA2=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+CONFIG_V4L_ISA_PARPORT_DRIVERS=y
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_W9966=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_TIMBERDALE=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_SH_MOBILE_CSI2=m
+CONFIG_VIDEO_SH_MOBILE_CEU=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_TESTDEV=m
+CONFIG_RADIO_ADAPTERS=y
+CONFIG_RADIO_SI470X=y
+CONFIG_USB_SI470X=m
+CONFIG_I2C_SI470X=m
+CONFIG_USB_MR800=m
+CONFIG_USB_DSBR=m
+CONFIG_I2C_SI4713=m
+CONFIG_RADIO_SI4713=m
+CONFIG_USB_KEENE=m
+CONFIG_RADIO_WL1273=m
+CONFIG_RADIO_WL128X=m
+CONFIG_DVB_MAX_ADAPTERS=8
+CONFIG_DVB_DYNAMIC_MINORS=y
+CONFIG_DVB_CAPTURE_DRIVERS=y
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_USB=m
+CONFIG_SMS_SIANO_MDTV=m
+CONFIG_SMS_USB_DRV=m
+CONFIG_SMS_SDIO_DRV=m
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+CONFIG_DVB_FE_CUSTOMISE=y
+CONFIG_DVB_PLL=m
+CONFIG_DRM=m
+CONFIG_DRM_USB=m
+CONFIG_DRM_KMS_HELPER=m
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_I2C_CH7006=m
+CONFIG_DRM_I2C_SIL164=m
+CONFIG_DRM_UDL=m
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB_SYS_FILLRECT=m
+CONFIG_FB_SYS_COPYAREA=m
+CONFIG_FB_SYS_IMAGEBLIT=m
+CONFIG_FB_SYS_FOPS=m
+CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_UVESA=m
+CONFIG_FB_S1D13XXX=m
+CONFIG_FB_TMIO=m
+CONFIG_FB_TMIO_ACCELL=y
+CONFIG_FB_SM501=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
+CONFIG_FB_METRONOME=m
+CONFIG_FB_BROADSHEET=m
+CONFIG_PANEL_LGPHILIPS_LB035Q02=m
+CONFIG_PANEL_SHARP_LS037V7DW01=y
+CONFIG_PANEL_NEC_NL8048HL11_01B=m
+CONFIG_PANEL_PICODLP=m
+CONFIG_PANEL_TPO_TD043MTEA1=y
+CONFIG_LCD_L4F00242T03=m
+CONFIG_LCD_LMS283GF05=m
+CONFIG_LCD_LTV350QV=m
+CONFIG_LCD_ILI9320=m
+CONFIG_LCD_TDO24M=m
+CONFIG_LCD_VGG2432A4=m
+CONFIG_LCD_S6E63M0=m
+CONFIG_LCD_LD9040=m
+CONFIG_LCD_AMS369FG06=m
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_PWM=m
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_DUMMY=m
+CONFIG_SND_ALOOP=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+CONFIG_SND_MTS64=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_PORTMAN2X4=m
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_UA101=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_USB_6FIRE=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_ACRUX=m
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_PRODIKEYS=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=m
+CONFIG_HID_ELECOM=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_HOLTEK=m
+CONFIG_HOLTEK_FF=y
+CONFIG_HID_KEYTOUCH=m
+CONFIG_HID_KYE=m
+CONFIG_HID_UCLOGIC=m
+CONFIG_HID_WALTOP=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LCPOWER=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIWHEELS_FF=y
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_PICOLCD=m
+CONFIG_HID_PICOLCD_FB=y
+CONFIG_HID_PICOLCD_BACKLIGHT=y
+CONFIG_HID_PICOLCD_LCD=y
+CONFIG_HID_PICOLCD_LEDS=y
+CONFIG_HID_PRIMAX=m
+CONFIG_HID_ROCCAT=m
+CONFIG_HID_SAITEK=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SPEEDLINK=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_WACOM=m
+CONFIG_HID_WACOM_POWER_SUPPLY=y
+CONFIG_HID_WIIMOTE=m
+CONFIG_HID_WIIMOTE_EXT=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_HID_ZYDACRON=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_OTG=y
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_C67X00_HCD=m
+CONFIG_USB_OXU210HP_HCD=m
+CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+CONFIG_USB_SL811_HCD_ISO=y
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_TMC=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_REALTEK_AUTOPM=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_ONETOUCH=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_STORAGE_ENE_UB6250=m
+CONFIG_USB_UAS=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_USS720=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_DEBUG=m
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_IOWARRIOR=m
+CONFIG_USB_ISIGHTFW=m
+CONFIG_USB_YUREX=m
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_CXACRU=m
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_XUSBATM=m
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+CONFIG_USB_ZERO=m
+CONFIG_USB_AUDIO=m
+CONFIG_GADGET_UAC1=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_G_NCM=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_ETH=y
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+CONFIG_USB_FUNCTIONFS_GENERIC=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_USB_G_PRINTER=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_NOKIA=m
+CONFIG_USB_G_ACM_MS=m
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+CONFIG_USB_G_HID=m
+CONFIG_USB_G_DBGP=m
+CONFIG_USB_G_DBGP_SERIAL=y
+CONFIG_USB_G_WEBCAM=m
+CONFIG_USB_OTG_UTILS=y
+CONFIG_USB_GPIO_VBUS=y
+CONFIG_USB_ULPI=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_SDHCI_PXAV3=m
+CONFIG_MMC_SDHCI_PXAV2=m
+CONFIG_MMC_SPI=m
+CONFIG_MMC_TMIO_CORE=m
+CONFIG_MMC_TMIO=m
+CONFIG_MMC_DW=m
+CONFIG_MMC_DW_PLTFM=m
+CONFIG_MMC_VUB300=m
+CONFIG_MMC_USHC=m
+CONFIG_MEMSTICK=m
+CONFIG_MSPRO_BLOCK=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_PCA9532=m
+CONFIG_LEDS_PCA9532_GPIO=y
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_LP5521=m
+CONFIG_LEDS_LP5523=m
+CONFIG_LEDS_PWM=m
+CONFIG_LEDS_REGULATOR=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=m
+CONFIG_TIMB_DMA=m
+CONFIG_DMA_ENGINE=y
+CONFIG_NET_DMA=y
+CONFIG_ASYNC_TX_DMA=y
+CONFIG_AUXDISPLAY=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_STAGING=y
+CONFIG_USBIP_CORE=m
+CONFIG_USBIP_VHCI_HCD=m
+CONFIG_USBIP_HOST=m
+CONFIG_W35UND=m
+CONFIG_PRISM2_USB=m
+CONFIG_ECHO=m
+CONFIG_ASUS_OLED=m
+CONFIG_PANEL=m
+CONFIG_PANEL_PARPORT=0
+CONFIG_PANEL_PROFILE=5
+CONFIG_RTLLIB=m
+CONFIG_RTLLIB_CRYPTO_CCMP=m
+CONFIG_RTLLIB_CRYPTO_TKIP=m
+CONFIG_RTLLIB_CRYPTO_WEP=m
+CONFIG_R8712U=m
+CONFIG_RTS5139=m
+CONFIG_TRANZPORT=m
+CONFIG_LINE6_USB=m
+CONFIG_USB_SERIAL_QUATECH2=m
+CONFIG_USB_SERIAL_QUATECH_USB2=m
+CONFIG_IIO=m
+CONFIG_IIO_ST_HWMON=m
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_SW_RING=m
+CONFIG_IIO_KFIFO_BUF=m
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+CONFIG_KXSD9=m
+CONFIG_SCA3000=m
+CONFIG_IIO_PERIODIC_RTC_TRIGGER=m
+CONFIG_IIO_GPIO_TRIGGER=m
+CONFIG_IIO_SYSFS_TRIGGER=m
+CONFIG_IIO_SIMPLE_DUMMY=m
+CONFIG_FB_SM7XX=m
+CONFIG_USB_ENESTORAGE=m
+CONFIG_BCM_WIMAX=m
+CONFIG_FT1000=m
+CONFIG_FT1000_USB=m
+CONFIG_SPEAKUP=m
+CONFIG_SPEAKUP_SYNTH_SPKOUT=m
+CONFIG_SPEAKUP_SYNTH_TXPRT=m
+CONFIG_SPEAKUP_SYNTH_DUMMY=m
+CONFIG_STAGING_MEDIA=y
+CONFIG_DVB_AS102=m
+CONFIG_EASYCAP=m
+CONFIG_LIRC_STAGING=y
+CONFIG_LIRC_IGORPLUGUSB=m
+CONFIG_LIRC_IMON=m
+CONFIG_LIRC_PARALLEL=m
+CONFIG_LIRC_SASEM=m
+CONFIG_LIRC_SERIAL=m
+CONFIG_LIRC_SIR=m
+CONFIG_LIRC_TTUSBIR=m
+CONFIG_LIRC_ZILOG=m
+CONFIG_PHONE=m
+CONFIG_USB_WPAN_HCD=m
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD=y
+CONFIG_JBD2=y
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_OCFS2_FS_O2CB=m
+CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
+CONFIG_OCFS2_FS_STATS=y
+CONFIG_OCFS2_DEBUG_MASKLOG=y
+CONFIG_NILFS2_FS=m
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_PRINT_QUOTA_WARNING=y
+CONFIG_QFMT_V1=m
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
+CONFIG_GENERIC_ACL=y
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+CONFIG_FAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+CONFIG_PROC_FS=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_PSTORE=y
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_FSCACHE=y
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_CEPH_FS=m
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NCP_FS=m
+CONFIG_NCPFS_PACKET_SIGNING=y
+CONFIG_NCPFS_IOCTL_LOCKING=y
+CONFIG_NCPFS_STRONG=y
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+CONFIG_NCPFS_NLS=y
+CONFIG_NCPFS_EXTRAS=y
+# CONFIG_CODA_FS is not set
+CONFIG_AFS_FS=m
+CONFIG_9P_FS=m
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_BOOT_PRINTK_DELAY=y
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_EVENT_POWER_TRACING_DEPRECATED=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_BRANCH_PROFILE_NONE=y
+CONFIG_KPROBE_EVENT=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_KGDB=y
+CONFIG_KGDB_SERIAL_CONSOLE=y
+CONFIG_KGDB_KDB=y
+CONFIG_KDB_KEYBOARD=y
+CONFIG_TEST_KSTRTOX=m
+CONFIG_STRICT_DEVMEM=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_NONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_TOMOYO=y
+CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
+CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
+CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
+CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_YAMA=y
+CONFIG_INTEGRITY=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_EVM=y
+CONFIG_DEFAULT_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY="apparmor"
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_BLOWFISH_COMMON=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+CONFIG_RAID6_PQ=m
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC16=y
+CONFIG_CRC32=y
+CONFIG_CRC32_SLICEBY8=y
+CONFIG_CRC8=m
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+CONFIG_XZ_DEC_TEST=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_BCH=y
+CONFIG_BCH_CONST_PARAMS=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_LRU_CACHE=m
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+CONFIG_CORDIC=m
+CONFIG_MPILIB=y
+CONFIG_SIGNATURE=y
diff --git a/linaro/configs/vexpress.conf b/linaro/configs/vexpress.conf
new file mode 100644
index 000000000000..6a06e8b38064
--- /dev/null
+++ b/linaro/configs/vexpress.conf
@@ -0,0 +1,35 @@
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_CMDLINE="console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait mmci.fmax=4000000"
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SMSC911X=y
+CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_ARMHDLCD=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_ARMAACI=y
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
diff --git a/linaro/configs/vexpress64.conf b/linaro/configs/vexpress64.conf
new file mode 100644
index 000000000000..d75f24710003
--- /dev/null
+++ b/linaro/configs/vexpress64.conf
@@ -0,0 +1,94 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PLAT_VEXPRESS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMC91X=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DEBUG_ERRORS=y
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1324cd74faec..b93376c39b61 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
while (start < end) {
unsigned long *map, idx, vec;
+ unsigned shift;
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
+ shift = idx & (BITS_PER_LONG - 1);
+ /*
+ * vec holds at most BITS_PER_LONG map bits,
+ * bit 0 corresponds to start.
+ */
vec = ~map[idx / BITS_PER_LONG];
+
+ if (shift) {
+ vec >>= shift;
+ if (end - start >= BITS_PER_LONG)
+ vec |= ~map[idx / BITS_PER_LONG + 1] <<
+ (BITS_PER_LONG - shift);
+ }
/*
* If we have a properly aligned and fully unreserved
* BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
- unsigned long off = 0;
+ unsigned long cur = start;
- vec >>= start & (BITS_PER_LONG - 1);
- while (vec) {
+ start = ALIGN(start + 1, BITS_PER_LONG);
+ while (vec && cur != start) {
if (vec & 1) {
- page = pfn_to_page(start + off);
+ page = pfn_to_page(cur);
__free_pages_bootmem(page, 0);
count++;
}
vec >>= 1;
- off++;
+ ++cur;
}
- start = ALIGN(start + 1, BITS_PER_LONG);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 6b807e466497..c62bd063d766 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -816,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
static int compact_finished(struct zone *zone,
struct compact_control *cc)
{
+ unsigned int order;
unsigned long watermark;
if (fatal_signal_pending(current))
@@ -850,22 +851,16 @@ static int compact_finished(struct zone *zone,
return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */
- if (cc->page) {
- /* Was a suitable page captured? */
- if (*cc->page)
+ for (order = cc->order; order < MAX_ORDER; order++) {
+ struct free_area *area = &zone->free_area[order];
+
+ /* Job done if page is free of the right migratetype */
+ if (!list_empty(&area->free_list[cc->migratetype]))
+ return COMPACT_PARTIAL;
+
+ /* Job done if allocation would set block type */
+ if (cc->order >= pageblock_order && area->nr_free)
return COMPACT_PARTIAL;
- } else {
- unsigned int order;
- for (order = cc->order; order < MAX_ORDER; order++) {
- struct free_area *area = &zone->free_area[cc->order];
- /* Job done if page is free of the right migratetype */
- if (!list_empty(&area->free_list[cc->migratetype]))
- return COMPACT_PARTIAL;
-
- /* Job done if allocation would set block type */
- if (cc->order >= pageblock_order && area->nr_free)
- return COMPACT_PARTIAL;
- }
}
return COMPACT_CONTINUE;
@@ -921,60 +916,6 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_CONTINUE;
}
-static void compact_capture_page(struct compact_control *cc)
-{
- unsigned long flags;
- int mtype, mtype_low, mtype_high;
-
- if (!cc->page || *cc->page)
- return;
-
- /*
- * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
- * regardless of the migratetype of the freelist is is captured from.
- * This is fine because the order for a high-order MIGRATE_MOVABLE
- * allocation is typically at least a pageblock size and overall
- * fragmentation is not impaired. Other allocation types must
- * capture pages from their own migratelist because otherwise they
- * could pollute other pageblocks like MIGRATE_MOVABLE with
- * difficult to move pages and making fragmentation worse overall.
- */
- if (cc->migratetype == MIGRATE_MOVABLE) {
- mtype_low = 0;
- mtype_high = MIGRATE_PCPTYPES;
- } else {
- mtype_low = cc->migratetype;
- mtype_high = cc->migratetype + 1;
- }
-
- /* Speculatively examine the free lists without zone lock */
- for (mtype = mtype_low; mtype < mtype_high; mtype++) {
- int order;
- for (order = cc->order; order < MAX_ORDER; order++) {
- struct page *page;
- struct free_area *area;
- area = &(cc->zone->free_area[order]);
- if (list_empty(&area->free_list[mtype]))
- continue;
-
- /* Take the lock and attempt capture of the page */
- if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
- return;
- if (!list_empty(&area->free_list[mtype])) {
- page = list_entry(area->free_list[mtype].next,
- struct page, lru);
- if (capture_free_page(page, cc->order, mtype)) {
- spin_unlock_irqrestore(&cc->zone->lock,
- flags);
- *cc->page = page;
- return;
- }
- }
- spin_unlock_irqrestore(&cc->zone->lock, flags);
- }
- }
-}
-
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
@@ -1054,9 +995,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
goto out;
}
}
-
- /* Capture a page now if it is a suitable size */
- compact_capture_page(cc);
}
out:
@@ -1069,8 +1007,7 @@ out:
static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
- bool sync, bool *contended,
- struct page **page)
+ bool sync, bool *contended)
{
unsigned long ret;
struct compact_control cc = {
@@ -1080,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
.sync = sync,
- .page = page,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -1110,7 +1046,7 @@ int sysctl_extfrag_threshold = 500;
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync, bool *contended, struct page **page)
+ bool sync, bool *contended)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1136,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
int status;
status = compact_zone_order(zone, order, gfp_mask, sync,
- contended, page);
+ contended);
rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */
@@ -1192,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
struct compact_control cc = {
.order = order,
.sync = false,
- .page = NULL,
};
return __compact_pgdat(pgdat, &cc);
@@ -1203,14 +1138,13 @@ static int compact_node(int nid)
struct compact_control cc = {
.order = -1,
.sync = true,
- .page = NULL,
};
return __compact_pgdat(NODE_DATA(nid), &cc);
}
/* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
{
int nid;
@@ -1219,8 +1153,6 @@ static int compact_nodes(void)
for_each_online_node(nid)
compact_node(nid);
-
- return COMPACT_COMPLETE;
}
/* The written value is actually unused, all memory is compacted */
@@ -1231,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
if (write)
- return compact_nodes();
+ compact_nodes();
return 0;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9e894edc7811..6001ee6347a9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
BUG_ON(!PageAnon(page));
- anon_vma = page_lock_anon_vma_read(page);
+
+ /*
+ * The caller does not necessarily hold an mmap_sem that would prevent
+ * the anon_vma disappearing so we first we take a reference to it
+ * and then lock the anon_vma for write. This is similar to
+ * page_lock_anon_vma_read except the write lock is taken to serialise
+ * against parallel split or collapse operations.
+ */
+ anon_vma = page_get_anon_vma(page);
if (!anon_vma)
goto out;
+ anon_vma_lock_write(anon_vma);
+
ret = 0;
if (!PageCompound(page))
goto out_unlock;
@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
BUG_ON(PageCompound(page));
out_unlock:
- page_unlock_anon_vma_read(anon_vma);
+ anon_vma_unlock(anon_vma);
+ put_anon_vma(anon_vma);
out:
return ret;
}
diff --git a/mm/internal.h b/mm/internal.h
index d597f94cc205..9ba21100ebf3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -135,7 +135,6 @@ struct compact_control {
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;
bool contended; /* True if a lock was contended */
- struct page **page; /* Page captured of requested size */
};
unsigned long
diff --git a/mm/memblock.c b/mm/memblock.c
index 625905523c2a..88adc8afb610 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
}
this->size += next->size;
- memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+ /* move forward from next + 1, index of which is i + 2 */
+ memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
type->cnt--;
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 3b676b0c5c3e..c38778610aa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
page_xchg_last_nid(new_page, page_last_nid(page));
isolated = numamigrate_isolate_page(pgdat, page);
- if (!isolated) {
+
+ /*
+ * Failing to isolate or a GUP pin prevents migration. The expected
+ * page count is 2. 1 for anonymous pages without a mapping and 1
+ * for the callers pin. If the page was isolated, the page will
+ * need to be put back on the LRU.
+ */
+ if (!isolated || page_count(page) != 2) {
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
put_page(new_page);
+ if (isolated) {
+ putback_lru_page(page);
+ isolated = 0;
+ goto out;
+ }
goto out_keep_locked;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f54b235f29a9..35730ee9d515 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
- down_write(&anon_vma->root->rwsem);
+ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bc6cc0e913bd..842d734d0466 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -195,6 +195,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -1384,14 +1385,8 @@ void split_page(struct page *page, unsigned int order)
set_page_refcounted(page + i);
}
-/*
- * Similar to the split_page family of functions except that the page
- * required at the given order and being isolated now to prevent races
- * with parallel allocators
- */
-int capture_free_page(struct page *page, int alloc_order, int migratetype)
+static int __isolate_free_page(struct page *page, unsigned int order)
{
- unsigned int order;
unsigned long watermark;
struct zone *zone;
int mt;
@@ -1399,7 +1394,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
BUG_ON(!PageBuddy(page));
zone = page_zone(page);
- order = page_order(page);
mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_ISOLATE) {
@@ -1408,7 +1402,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
- __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
+ __mod_zone_freepage_state(zone, -(1UL << order), mt);
}
/* Remove page from free list */
@@ -1416,11 +1410,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- if (alloc_order != order)
- expand(zone, page, alloc_order, order,
- &zone->free_area[order], migratetype);
-
- /* Set the pageblock if the captured page is at least a pageblock */
+ /* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
@@ -1431,7 +1421,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
}
}
- return 1UL << alloc_order;
+ return 1UL << order;
}
/*
@@ -1449,10 +1439,9 @@ int split_free_page(struct page *page)
unsigned int order;
int nr_pages;
- BUG_ON(!PageBuddy(page));
order = page_order(page);
- nr_pages = capture_free_page(page, order, 0);
+ nr_pages = __isolate_free_page(page, order);
if (!nr_pages)
return 0;
@@ -1642,7 +1631,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
- min >>= 1;
+ min >>= min_free_order_shift;
if (free_pages <= min)
return false;
@@ -2136,8 +2125,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
- struct page *page = NULL;
-
if (!order)
return NULL;
@@ -2149,16 +2136,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration,
- contended_compaction, &page);
+ contended_compaction);
current->flags &= ~PF_MEMALLOC;
- /* If compaction captured a page, prep and use it */
- if (page) {
- prep_new_page(page, order, gfp_mask);
- goto got_page;
- }
-
if (*did_some_progress != COMPACT_SKIPPED) {
+ struct page *page;
+
/* Page migration frees to the PCP lists but we want merging */
drain_pages(get_cpu());
put_cpu();
@@ -2168,7 +2151,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
alloc_flags & ~ALLOC_NO_WATERMARKS,
preferred_zone, migratetype);
if (page) {
-got_page:
preferred_zone->compact_blockskip_flush = false;
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
@@ -5604,7 +5586,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
pfn &= (PAGES_PER_SECTION-1);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#else
- pfn = pfn - zone->zone_start_pfn;
+ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#endif /* CONFIG_SPARSEMEM */
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 5dd56f6efdbd..5cd37f43d3a7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2929,6 +2929,14 @@ put_memory:
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -2942,10 +2950,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
+ shmem_set_file(vma, file);
return 0;
}
diff --git a/net/Kconfig b/net/Kconfig
index 30b48f523135..9927a000f630 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -81,6 +81,20 @@ source "net/netlabel/Kconfig"
endif # if INET
+config ANDROID_PARANOID_NETWORK
+ bool "Only allow certain groups to create sockets"
+ default n
+ help
+ none
+
+config NET_ACTIVITY_STATS
+ bool "Network activity statistics tracking"
+ default n
+ help
+ Network activity statistics are useful for tracking wireless
+ modem activity on 2G, 3G, 4G wireless networks. Counts number of
+ transmissions and groups them in specified time buckets.
+
config NETWORK_SECMARK
bool "Security Marking"
help
@@ -220,7 +234,7 @@ source "net/batman-adv/Kconfig"
source "net/openvswitch/Kconfig"
config RPS
- boolean
+ boolean "RPS"
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
diff --git a/net/Makefile b/net/Makefile
index 4f4ee083064c..2f8855ef5345 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -70,3 +70,4 @@ obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
+obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 000000000000..8a3e93470069
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+ int i;
+ unsigned long flags;
+ ktime_t now;
+ s64 delta;
+
+ spin_lock_irqsave(&activity_lock, flags);
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+ for (i = BUCKET_MAX - 1; i >= 0; i--) {
+ /*
+ * Check if the time delta between network activity is within the
+ * minimum bucket range.
+ */
+ if (delta < (1000000000ULL << i))
+ continue;
+
+ activity_stats[i]++;
+ last_transmit = now;
+ break;
+ }
+ spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i;
+ int len;
+ char *p = page;
+
+ /* Only print if offset is 0, or we have enough buffer space */
+ if (off || count < (30 * BUCKET_MAX + 22))
+ return -ENOMEM;
+
+ len = snprintf(p, count, "Min Bucket(sec) Count\n");
+ count -= len;
+ p += len;
+
+ for (i = 0; i < BUCKET_MAX; i++) {
+ len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
+ count -= len;
+ p += len;
+ }
+ *eof = 1;
+
+ return p - page;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ suspend_time = ktime_get_real();
+ break;
+
+ case PM_POST_SUSPEND:
+ suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+ last_transmit = ktime_sub(last_transmit, suspend_time);
+ }
+
+ return 0;
+}
+
+static struct notifier_block activity_stats_notifier_block = {
+ .notifier_call = activity_stats_notifier,
+};
+
+static int __init activity_stats_init(void)
+{
+ create_proc_read_entry("activity", S_IRUGO,
+ init_net.proc_net_stat, activity_stats_read_proc, NULL);
+ return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5355df63d39b..ca21d11fe159 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -30,6 +30,15 @@
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
+#ifndef CONFIG_BT_SOCK_DEBUG
+#undef BT_DBG
+#define BT_DBG(D...)
+#endif
+
#define VERSION "2.16"
/* Bluetooth sockets */
@@ -112,11 +121,40 @@ int bt_sock_unregister(int proto)
}
EXPORT_SYMBOL(bt_sock_unregister);
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+ return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
+}
+
+static inline int current_has_bt(void)
+{
+ return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+ return 1;
+}
+
+static inline int current_has_bt(void)
+{
+ return 1;
+}
+#endif
+
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
int kern)
{
int err;
+ if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+ proto == BTPROTO_L2CAP) {
+ if (!current_has_bt())
+ return -EPERM;
+ } else if (!current_has_bt_admin())
+ return -EPERM;
+
if (net != &init_net)
return -EAFNOSUPPORT;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0666eb..5080262ab785 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -884,6 +884,15 @@ int hci_get_conn_list(void __user *arg)
(ci + n)->out = c->out;
(ci + n)->state = c->state;
(ci + n)->link_mode = c->link_mode;
+ if (c->type == SCO_LINK) {
+ (ci + n)->mtu = hdev->sco_mtu;
+ (ci + n)->cnt = hdev->sco_cnt;
+ (ci + n)->pkts = hdev->sco_pkts;
+ } else {
+ (ci + n)->mtu = hdev->acl_mtu;
+ (ci + n)->cnt = hdev->acl_cnt;
+ (ci + n)->pkts = hdev->acl_pkts;
+ }
if (++n >= req.conn_num)
break;
}
@@ -920,6 +929,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
ci.out = conn->out;
ci.state = conn->state;
ci.link_mode = conn->link_mode;
+ if (req.type == SCO_LINK) {
+ ci.mtu = hdev->sco_mtu;
+ ci.cnt = hdev->sco_cnt;
+ ci.pkts = hdev->sco_pkts;
+ } else {
+ ci.mtu = hdev->acl_mtu;
+ ci.cnt = hdev->acl_cnt;
+ ci.pkts = hdev->acl_pkts;
+ }
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 705078a0cc39..8e57b29dbd1a 100644..100755
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2047,6 +2047,15 @@ unlock:
hci_conn_check_pending(hdev);
}
+static inline bool is_sco_active(struct hci_dev *hdev)
+{
+ if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
+ (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
+ BT_CONNECTED)))
+ return true;
+ return false;
+}
+
void hci_conn_accept(struct hci_conn *conn, int mask)
{
struct hci_dev *hdev = conn->hdev;
@@ -2129,7 +2138,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
bacpy(&cp.bdaddr, &ev->bdaddr);
- if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+ if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
+ || is_sco_active(hdev)))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
@@ -3306,6 +3316,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
hci_conn_add_sysfs(conn);
break;
+ case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 201fdf737209..c496b0df9556 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -448,7 +448,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
switch (d->state) {
case BT_CONNECT:
- case BT_CONFIG:
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
rfcomm_schedule();
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 7c78e2640190..76c534e4b406 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -40,16 +40,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
- u64_stats_update_begin(&brstats->syncp);
- brstats->tx_packets++;
- brstats->tx_bytes += skb->len;
- u64_stats_update_end(&brstats->syncp);
-
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
+ u64_stats_update_begin(&brstats->syncp);
+ brstats->tx_packets++;
+ /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+ brstats->tx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
+
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb);
else if (is_multicast_ether_addr(dest)) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 515473ee52cb..f64e439b4a00 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6121,6 +6121,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
static const struct ethtool_ops default_ethtool_ops;
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+ const struct ethtool_ops *ops)
+{
+ if (dev->ethtool_ops == &default_ethtool_ops)
+ dev->ethtool_ops = ops;
+}
+EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
+
/**
* alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 15ca63ec604e..146bf8d1bf11 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \
inet_fragment.o ping.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24b384b7903e..d64ce0df06eb 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,19 @@
#include <linux/mroute.h>
#endif
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
@@ -278,6 +291,7 @@ static inline int inet_netns_ok(struct net *net, __u8 protocol)
return ipprot->netns_ok;
}
+
/*
* Create an inet socket.
*/
@@ -294,6 +308,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
@@ -919,6 +936,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
+ case SIOCKILLADDR:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a8e4f2665d5e..8961198608d4 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
#include <net/arp.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
@@ -731,6 +732,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ case SIOCKILLADDR: /* Nuke all sockets on this address */
ret = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto out;
@@ -782,7 +784,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
}
ret = -EADDRNOTAVAIL;
- if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+ if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+ && cmd != SIOCKILLADDR)
goto done;
switch (cmd) {
@@ -908,6 +911,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
inet_insert_ifa(ifa);
}
break;
+ case SIOCKILLADDR: /* Nuke all connections on this address */
+ ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
+ break;
}
done:
rtnl_unlock();
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3c9d20880283..d9c4f113d709 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
case IP_TTL:
if (optlen < 1)
goto e_inval;
- if (val != -1 && (val < 0 || val > 255))
+ if (val != -1 && (val < 1 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d8d6f2a5bf12..829ffa33c0ea 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -123,6 +123,18 @@ config IP_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_REJECT_SKERR
+ bool "Force socket error when rejecting with icmp*"
+ depends on IP_NF_TARGET_REJECT
+ default n
+ help
+ This option enables turning a "--reject-with icmp*" into a matching
+ socket error also.
+ The REJECT target normally allows sending an ICMP message. But it
+ leaves the local socket unaware of any ingress rejects.
+
+ If unsure, say N.
+
config IP_NF_TARGET_ULOG
tristate "ULOG target support"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 04b18c1ac345..452e8a587c34 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -129,6 +129,14 @@ static void send_reset(struct sk_buff *oldskb, int hook)
static inline void send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
+ if (skb_in->sk) {
+ skb_in->sk->sk_err = icmp_err_convert[code].errno;
+ skb_in->sk->sk_error_report(skb_in->sk);
+ pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
+ skb_in->sk->sk_err, skb_in, skb_in->sk);
+ }
+#endif
}
static unsigned int
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 000000000000..0cbbf10026a6
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val, ret; \
+ ret = sscanf(buf, "%d", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ if (val < 0) \
+ return -EINVAL; \
+ _var = val; \
+ return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+ &tcp_wmem_min_attr.attr,
+ &tcp_wmem_def_attr.attr,
+ &tcp_wmem_max_attr.attr,
+ &tcp_rmem_min_attr.attr,
+ &tcp_rmem_def_attr.attr,
+ &tcp_rmem_max_attr.attr,
+ NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+ .attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+ struct kobject *ipv4_kobject;
+ int ret;
+
+ ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+ if (!ipv4_kobject)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+ if (ret) {
+ kobject_put(ipv4_kobject);
+ return ret;
+ }
+
+ return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1ca253635f7a..e74021c527cb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -268,12 +268,16 @@
#include <linux/crypto.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/uid_stat.h>
#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
#include <net/netdma.h>
#include <net/sock.h>
@@ -1224,6 +1228,9 @@ out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
+
+ if (copied > 0)
+ uid_stat_tcp_snd(current_uid(), copied);
return copied + copied_syn;
do_fault:
@@ -1428,12 +1435,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
}
#endif
-static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
+static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
u32 offset;
- skb_queue_walk(&sk->sk_receive_queue, skb) {
+ while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
offset = seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
@@ -1441,6 +1448,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
*off = offset;
return skb;
}
+ /* This looks weird, but this can happen if TCP collapsing
+ * splitted a fat GRO packet, while we released socket lock
+ * in skb_splice_bits()
+ */
+ sk_eat_skb(sk, skb, false);
}
return NULL;
}
@@ -1482,7 +1494,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
break;
}
used = recv_actor(desc, skb, offset, len);
- if (used < 0) {
+ if (used <= 0) {
if (!copied)
copied = used;
break;
@@ -1520,8 +1532,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
- if (copied > 0)
+ if (copied > 0) {
+ tcp_recv_skb(sk, seq, &offset);
tcp_cleanup_rbuf(sk, copied);
+ uid_stat_tcp_rcv(current_uid(), copied);
+ }
return copied;
}
EXPORT_SYMBOL(tcp_read_sock);
@@ -1925,6 +1940,9 @@ skip_copy:
tcp_cleanup_rbuf(sk, copied);
release_sock(sk);
+
+ if (copied > 0)
+ uid_stat_tcp_rcv(current_uid(), copied);
return copied;
out:
@@ -1933,6 +1951,8 @@ out:
recv_urg:
err = tcp_recv_urg(sk, msg, len, flags);
+ if (err > 0)
+ uid_stat_tcp_rcv(current_uid(), err);
goto out;
recv_sndq:
@@ -3657,3 +3677,107 @@ void __init tcp_init(void)
tcp_secret_secondary = &tcp_secret_two;
tcp_tasklet_init();
}
+
+static int tcp_is_local(struct net *net, __be32 addr) {
+ struct rtable *rt;
+ struct flowi4 fl4 = { .daddr = addr };
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR_OR_NULL(rt))
+ return 0;
+ return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
+ struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
+ return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
+}
+#endif
+
+/*
+ * tcp_nuke_addr - destroy all sockets on the given local address
+ * if local address is the unspecified address (0.0.0.0 or ::), destroy all
+ * sockets with local addresses that are not configured.
+ */
+int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
+{
+ int family = addr->sa_family;
+ unsigned int bucket;
+
+ struct in_addr *in;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct in6_addr *in6;
+#endif
+ if (family == AF_INET) {
+ in = &((struct sockaddr_in *)addr)->sin_addr;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ } else if (family == AF_INET6) {
+ in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
+#endif
+ } else {
+ return -EAFNOSUPPORT;
+ }
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if (family == AF_INET) {
+ __be32 s4 = inet->inet_rcv_saddr;
+ if (s4 == LOOPBACK4_IPV6)
+ continue;
+
+ if (in->s_addr != s4 &&
+ !(in->s_addr == INADDR_ANY &&
+ !tcp_is_local(net, s4)))
+ continue;
+ }
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ if (family == AF_INET6) {
+ struct in6_addr *s6;
+ if (!inet->pinet6)
+ continue;
+
+ s6 = &inet->pinet6->rcv_saddr;
+ if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
+ continue;
+
+ if (!ipv6_addr_equal(in6, s6) &&
+ !(ipv6_addr_equal(in6, &in6addr_any) &&
+ !tcp_is_local6(net, s6)))
+ continue;
+ }
+#endif
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+
+ return 0;
+}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a28e4db8a952..18f97ca76b00 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5543,7 +5543,7 @@ slow_path:
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
goto csum_error;
- if (!th->ack)
+ if (!th->ack && !th->rst)
goto discard;
/*
@@ -5988,7 +5988,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
}
- if (!th->ack)
+ if (!th->ack && !th->rst)
goto discard;
if (!tcp_validate_incoming(sk, skb, th, 0))
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 408cac4ae00a..420e56326384 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -154,6 +154,11 @@ static void addrconf_type_change(struct net_device *dev,
unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
+static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ int plen,
+ const struct net_device *dev,
+ u32 flags, u32 noflags);
+
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_timer(unsigned long data);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
@@ -250,12 +255,6 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
return !qdisc_tx_is_noop(dev);
}
-/* Check if a route is valid prefix route */
-static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
-{
- return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
-}
-
static void addrconf_del_timer(struct inet6_ifaddr *ifp)
{
if (del_timer(&ifp->timer))
@@ -941,17 +940,15 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
struct in6_addr prefix;
struct rt6_info *rt;
- struct net *net = dev_net(ifp->idev->dev);
- struct flowi6 fl6 = {};
ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
- fl6.flowi6_oif = ifp->idev->dev->ifindex;
- fl6.daddr = prefix;
- rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
- RT6_LOOKUP_F_IFACE);
- if (rt != net->ipv6.ip6_null_entry &&
- addrconf_is_prefix_route(rt)) {
+ rt = addrconf_get_prefix_route(&prefix,
+ ifp->prefix_len,
+ ifp->idev->dev,
+ 0, RTF_GATEWAY | RTF_DEFAULT);
+
+ if (rt) {
if (onlink == 0) {
ip6_del_rt(rt);
rt = NULL;
@@ -1877,7 +1874,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
+ if ((rt->rt6i_flags & noflags) != 0)
continue;
dst_hold(&rt->dst);
break;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b043c60429bd..ec3d58d477f9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,20 @@
#include <asm/uaccess.h>
#include <linux/mroute6.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
+
MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");
@@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (sock->type != SOCK_RAW &&
sock->type != SOCK_DGRAM &&
!inet_ehash_secret)
@@ -478,6 +495,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
}
EXPORT_SYMBOL(inet6_getname);
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+ struct in6_ifreq ireq;
+ struct sockaddr_in6 sin6;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+ return -EFAULT;
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = ireq.ifr6_addr;
+ return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
@@ -501,6 +533,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return addrconf_del_ifaddr(net, (void __user *) arg);
case SIOCSIFDSTADDR:
return addrconf_set_dstaddr(net, (void __user *) arg);
+ case SIOCKILLADDR:
+ return inet6_killaddr_ioctl(net, (void __user *) arg);
default:
if (!sk->sk_prot->ioctl)
return -ENOIOCTLCMD;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index c72532a60d88..2c721eb2c2a2 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -153,6 +153,18 @@ config IP6_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_REJECT_SKERR
+ bool "Force socket error when rejecting with icmp*"
+ depends on IP6_NF_TARGET_REJECT
+ default n
+ help
+ This option enables turning a "--reject-with icmp*" into a matching
+ socket error also.
+ The REJECT target normally allows sending an ICMP message. But it
+ leaves the local socket unaware of any ingress rejects.
+
+ If unsure, say N.
+
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 029623dbd411..944560b4f6bf 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -178,6 +178,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
skb_in->dev = net->loopback_dev;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
+ if (skb_in->sk) {
+ icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
+ &skb_in->sk->sk_err);
+ skb_in->sk->sk_error_report(skb_in->sk);
+ pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
+ skb_in->sk->sk_err, skb_in, skb_in->sk);
+ }
+#endif
}
static unsigned int
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3ad1f9db5f8b..df082508362d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1806,7 +1806,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
struct iucv_irq_data *p;
struct iucv_irq_list *work;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
+ inc_irq_stat(IRQEXT_IUC);
p = iucv_irq_data[smp_processor_id()];
if (p->ippathid >= iucv_max_pathid) {
WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5c61677487cf..47e0aca614b7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1009,6 +1009,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ sta_info_flush(local, vlan);
sta_info_flush(local, sdata);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 53f03120db55..80e55527504b 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -4,6 +4,7 @@
#include <linux/nl80211.h>
#include <linux/export.h>
+#include <linux/rtnetlink.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -197,6 +198,15 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ struct ieee80211_sub_if_data *vlan;
+
+ /* for the VLAN list */
+ ASSERT_RTNL();
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
+ }
+
ieee80211_unassign_vif_chanctx(sdata, ctx);
if (ctx->refcount == 0)
ieee80211_free_chanctx(local, ctx);
@@ -316,6 +326,15 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
goto out;
}
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ struct ieee80211_sub_if_data *vlan;
+
+ /* for the VLAN list */
+ ASSERT_RTNL();
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf);
+ }
+
ieee80211_recalc_smps_chanctx(local, ctx);
out:
mutex_unlock(&local->chanctx_mtx);
@@ -331,6 +350,25 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&sdata->local->chanctx_mtx);
}
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *ap;
+ struct ieee80211_chanctx_conf *conf;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
+ return;
+
+ ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
+
+ mutex_lock(&local->chanctx_mtx);
+
+ conf = rcu_dereference_protected(ap->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+ mutex_unlock(&local->chanctx_mtx);
+}
+
void ieee80211_iter_chan_contexts_atomic(
struct ieee80211_hw *hw,
void (*iter)(struct ieee80211_hw *hw,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8881fc77fb13..6b7644e818d8 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -703,8 +703,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata,
"No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
- ieee80211_request_internal_scan(sdata,
- ifibss->ssid, ifibss->ssid_len, NULL);
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
+ NULL);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,9 +802,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
IEEE80211_SCAN_INTERVAL)) {
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
- ieee80211_request_internal_scan(sdata,
- ifibss->ssid, ifibss->ssid_len,
- ifibss->fixed_channel ? ifibss->channel : NULL);
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+ ifibss->ssid_len, chan);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 42d0d0267730..8563b9a5cac3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -92,8 +92,6 @@ struct ieee80211_bss {
u32 device_ts;
- u8 dtim_period;
-
bool wmm_used;
bool uapsd_supported;
@@ -140,7 +138,6 @@ enum ieee80211_bss_corrupt_data_flags {
/**
* enum ieee80211_valid_data_flags - BSS valid data flags
- * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
* @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -151,7 +148,6 @@ enum ieee80211_bss_corrupt_data_flags {
* beacon/probe response.
*/
enum ieee80211_bss_valid_data_flags {
- IEEE80211_BSS_VALID_DTIM = BIT(0),
IEEE80211_BSS_VALID_WMM = BIT(1),
IEEE80211_BSS_VALID_RATES = BIT(2),
IEEE80211_BSS_VALID_ERP = BIT(3)
@@ -440,6 +436,7 @@ struct ieee80211_if_managed {
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
bool broken_ap; /* AP is broken -- turn off powersave */
+ u8 dtim_period;
enum ieee80211_smps_mode req_smps, /* requested smps mode */
driver_smps_mode; /* smps mode request */
@@ -773,6 +770,10 @@ struct ieee80211_sub_if_data {
u32 mntr_flags;
} u;
+ spinlock_t cleanup_stations_lock;
+ struct list_head cleanup_stations;
+ struct work_struct cleanup_stations_wk;
+
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *dir;
@@ -1329,9 +1330,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
/* scan/BSS handling */
void ieee80211_scan_work(struct work_struct *work);
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan);
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1628,6 +1629,7 @@ ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode mode);
void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 09a80b55cf5a..8be854e86cd9 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -207,17 +207,8 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
{
- int meshhdrlen;
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
-
- /* FIX: what would be proper limits for MTU?
- * This interface uses 802.3 frames. */
- if (new_mtu < 256 ||
- new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
+ if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN)
return -EINVAL;
- }
dev->mtu = new_mtu;
return 0;
@@ -586,11 +577,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
- /* no need to tell driver, but set carrier */
- if (rtnl_dereference(sdata->bss->beacon))
+ /* no need to tell driver, but set carrier and chanctx */
+ if (rtnl_dereference(sdata->bss->beacon)) {
+ ieee80211_vif_vlan_copy_chanctx(sdata);
netif_carrier_on(dev);
- else
+ } else {
netif_carrier_off(dev);
+ }
break;
case NL80211_IFTYPE_MONITOR:
if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -839,6 +832,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
list_del(&sdata->u.vlan.list);
+ rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
/* no need to tell driver */
break;
case NL80211_IFTYPE_MONITOR:
@@ -865,20 +859,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&sdata->work);
/*
* When we get here, the interface is marked down.
- * Call rcu_barrier() to wait both for the RX path
+ * Call synchronize_rcu() to wait for the RX path
* should it be using the interface and enqueuing
- * frames at this very time on another CPU, and
- * for the sta free call_rcu callbacks.
- */
- rcu_barrier();
-
- /*
- * free_sta_rcu() enqueues a work for the actual
- * sta cleanup, so we need to flush it while
- * sdata is still valid.
+ * frames at this very time on another CPU.
*/
- flush_workqueue(local->workqueue);
-
+ synchronize_rcu();
skb_queue_purge(&sdata->skb_queue);
/*
@@ -1498,6 +1483,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
mutex_unlock(&local->iflist_mtx);
}
+static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
+
+ ieee80211_cleanup_sdata_stas(sdata);
+}
+
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params)
@@ -1573,6 +1567,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
INIT_LIST_HEAD(&sdata->key_list);
+ spin_lock_init(&sdata->cleanup_stations_lock);
+ INIT_LIST_HEAD(&sdata->cleanup_stations);
+ INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
+
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[i];
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 1bf03f9ff3ba..649ad513547f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -163,7 +163,7 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
return -ENOMEM;
sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
for (i = 0; i < RMC_BUCKETS; i++)
- INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
+ INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
return 0;
}
@@ -177,7 +177,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
return;
for (i = 0; i < RMC_BUCKETS; i++)
- list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
+ list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
list_del(&p->list);
kmem_cache_free(rm_cache, p);
}
@@ -210,7 +210,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
/* Don't care about endianness since only match matters */
memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
- list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
+ list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
++entries;
if (time_after(jiffies, p->exp_time) ||
(entries == RMC_QUEUE_MAX_LEN)) {
@@ -229,7 +229,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
- list_add(&p->list, &rmc->bucket[idx].list);
+ list_add(&p->list, &rmc->bucket[idx]);
return 0;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7c9215fb2ac8..84c28c6101cd 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -184,7 +184,7 @@ struct rmc_entry {
};
struct mesh_rmc {
- struct rmc_entry bucket[RMC_BUCKETS];
+ struct list_head bucket[RMC_BUCKETS];
u32 idx_mask;
};
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7753a9ca98a6..a3552929a21d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1074,12 +1074,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
if (beaconint_us > latency) {
local->ps_sdata = NULL;
} else {
- struct ieee80211_bss *bss;
int maxslp = 1;
- u8 dtimper;
-
- bss = (void *)found->u.mgd.associated->priv;
- dtimper = bss->dtim_period;
+ u8 dtimper = found->u.mgd.dtim_period;
/* If the TIM IE is invalid, pretend the value is 1 */
if (!dtimper)
@@ -1410,10 +1406,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_led_assoc(local, 1);
- if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
- bss_conf->dtim_period = bss->dtim_period;
- else
+ if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+ /*
+ * If the AP is buggy we may get here with no DTIM period
+ * known, so assume it's 1 which is the only safe assumption
+ * in that case, although if the TIM IE is broken powersave
+ * probably just won't work at all.
+ */
+ bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
+ } else {
bss_conf->dtim_period = 0;
+ }
bss_conf->assoc = 1;
@@ -1562,6 +1565,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.timers_running = 0;
+ sdata->vif.bss_conf.dtim_period = 0;
+
ifmgd->flags = 0;
ieee80211_vif_release_channel(sdata);
}
@@ -2373,11 +2378,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel;
bool need_ps = false;
- if (sdata->u.mgd.associated &&
- ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
- bss = (void *)sdata->u.mgd.associated->priv;
+ if ((sdata->u.mgd.associated &&
+ ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
+ (sdata->u.mgd.assoc_data &&
+ ether_addr_equal(mgmt->bssid,
+ sdata->u.mgd.assoc_data->bss->bssid))) {
/* not previously set so we may need to recalc */
- need_ps = !bss->dtim_period;
+ need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
+
+ if (elems->tim && !elems->parse_error) {
+ struct ieee80211_tim_ie *tim_ie = elems->tim;
+ sdata->u.mgd.dtim_period = tim_ie->dtim_period;
+ }
}
if (elems->ds_params && elems->ds_params_len == 1)
@@ -3896,20 +3908,41 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* kick off associate process */
ifmgd->assoc_data = assoc_data;
+ ifmgd->dtim_period = 0;
err = ieee80211_prep_connection(sdata, req->bss, true);
if (err)
goto err_clear;
- if (!bss->dtim_period &&
- sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
- /*
- * Wait up to one beacon interval ...
- * should this be more if we miss one?
- */
- sdata_info(sdata, "waiting for beacon from %pM\n",
- ifmgd->bssid);
- assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
+ if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+ const struct cfg80211_bss_ies *beacon_ies;
+
+ rcu_read_lock();
+ beacon_ies = rcu_dereference(req->bss->beacon_ies);
+ if (!beacon_ies) {
+ /*
+ * Wait up to one beacon interval ...
+ * should this be more if we miss one?
+ */
+ sdata_info(sdata, "waiting for beacon from %pM\n",
+ ifmgd->bssid);
+ assoc_data->timeout =
+ TU_TO_EXP_TIME(req->bss->beacon_interval);
+ } else {
+ const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
+ beacon_ies->data,
+ beacon_ies->len);
+ if (tim_ie && tim_ie[1] >=
+ sizeof(struct ieee80211_tim_ie)) {
+ const struct ieee80211_tim_ie *tim;
+ tim = (void *)(tim_ie + 2);
+ ifmgd->dtim_period = tim->dtim_period;
+ }
+ assoc_data->have_beacon = true;
+ assoc_data->sent_assoc = false;
+ assoc_data->timeout = jiffies;
+ }
+ rcu_read_unlock();
} else {
assoc_data->have_beacon = true;
assoc_data->sent_assoc = false;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 8ed83dcc149f..d59fc6818b1c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -113,18 +113,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->valid_data |= IEEE80211_BSS_VALID_ERP;
}
- if (elems->tim && (!elems->parse_error ||
- !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
- struct ieee80211_tim_ie *tim_ie = elems->tim;
- bss->dtim_period = tim_ie->dtim_period;
- if (!elems->parse_error)
- bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
- }
-
- /* If the beacon had no TIM IE, or it was invalid, use 1 */
- if (beacon && !bss->dtim_period)
- bss->dtim_period = 1;
-
/* replace old supported rates if we get new values */
if (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
@@ -832,9 +820,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
return res;
}
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan)
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
@@ -848,22 +836,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
/* fill internal scan request */
if (!chan) {
- int i, nchan = 0;
+ int i, max_n;
+ int n_ch = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!local->hw.wiphy->bands[band])
continue;
- for (i = 0;
- i < local->hw.wiphy->bands[band]->n_channels;
- i++) {
- local->int_scan_req->channels[nchan] =
+
+ max_n = local->hw.wiphy->bands[band]->n_channels;
+ for (i = 0; i < max_n; i++) {
+ struct ieee80211_channel *tmp_ch =
&local->hw.wiphy->bands[band]->channels[i];
- nchan++;
+
+ if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_DISABLED))
+ continue;
+
+ local->int_scan_req->channels[n_ch] = tmp_ch;
+ n_ch++;
}
}
- local->int_scan_req->n_channels = nchan;
+ if (WARN_ON_ONCE(n_ch == 0))
+ goto unlock;
+
+ local->int_scan_req->n_channels = n_ch;
} else {
+ if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_DISABLED)))
+ goto unlock;
+
local->int_scan_req->channels[0] = chan;
local->int_scan_req->n_channels = 1;
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f3e502502fee..ca9fde198188 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
return -ENOENT;
}
-static void free_sta_work(struct work_struct *wk)
+static void cleanup_single_sta(struct sta_info *sta)
{
- struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
int ac, i;
struct tid_ampdu_tx *tid_tx;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -153,11 +152,35 @@ static void free_sta_work(struct work_struct *wk)
sta_info_free(local, sta);
}
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+
+ spin_lock_bh(&sdata->cleanup_stations_lock);
+ while (!list_empty(&sdata->cleanup_stations)) {
+ sta = list_first_entry(&sdata->cleanup_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&sdata->cleanup_stations_lock);
+
+ cleanup_single_sta(sta);
+
+ spin_lock_bh(&sdata->cleanup_stations_lock);
+ }
+
+ spin_unlock_bh(&sdata->cleanup_stations_lock);
+}
+
static void free_sta_rcu(struct rcu_head *h)
{
struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
- ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
+ spin_lock(&sdata->cleanup_stations_lock);
+ list_add_tail(&sta->list, &sdata->cleanup_stations);
+ spin_unlock(&sdata->cleanup_stations_lock);
+
+ ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
}
/* protected by RCU */
@@ -310,7 +333,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
spin_lock_init(&sta->lock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
- INIT_WORK(&sta->free_sta_wk, free_sta_work);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -862,7 +884,7 @@ void sta_info_init(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
- del_timer(&local->sta_cleanup);
+ del_timer_sync(&local->sta_cleanup);
sta_info_flush(local, NULL);
}
@@ -891,6 +913,20 @@ int sta_info_flush(struct ieee80211_local *local,
}
mutex_unlock(&local->sta_mtx);
+ rcu_barrier();
+
+ if (sdata) {
+ ieee80211_cleanup_sdata_stas(sdata);
+ cancel_work_sync(&sdata->cleanup_stations_wk);
+ } else {
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ ieee80211_cleanup_sdata_stas(sdata);
+ cancel_work_sync(&sdata->cleanup_stations_wk);
+ }
+ mutex_unlock(&local->iflist_mtx);
+ }
+
return ret;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 1489bca9ea97..37c1889afd3a 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -299,7 +299,6 @@ struct sta_info {
spinlock_t lock;
struct work_struct drv_unblock_wk;
- struct work_struct free_sta_wk;
u16 listen_interval;
@@ -563,4 +562,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
+
#endif /* STA_INFO_H */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 49e96df5fbc4..bf5c7c9efb83 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1067,6 +1067,8 @@ config NETFILTER_XT_MATCH_OWNER
based on who created the socket: the user or group. It is also
possible to check whether a socket actually exists.
+ Conflicts with '"quota, tag, uid" match'
+
config NETFILTER_XT_MATCH_POLICY
tristate 'IPsec "policy" match support'
depends on XFRM
@@ -1100,6 +1102,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_QTAGUID
+ bool '"quota, tag, owner" match and stats support'
+ depends on NETFILTER_XT_MATCH_SOCKET
+ depends on NETFILTER_XT_MATCH_OWNER=n
+ help
+ This option replaces the `owner' match. In addition to matching
+ on uid, it keeps stats based on a tag assigned to a socket.
+ The full tag is comprised of a UID and an accounting tag.
+ The tags are assignable to sockets from user space (e.g. a download
+ manager can assign the socket to another UID for accounting).
+ Stats and control are done via /proc/net/xt_qtaguid/.
+ It replaces owner as it takes the same arguments, but should
+ really be recognized by the iptables tool.
+
+ If unsure, say `N'.
+
config NETFILTER_XT_MATCH_QUOTA
tristate '"quota" match support'
depends on NETFILTER_ADVANCED
@@ -1110,6 +1128,30 @@ config NETFILTER_XT_MATCH_QUOTA
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_QUOTA2
+ tristate '"quota2" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds a `quota2' match, which allows to match on a
+ byte counter correctly and not per CPU.
+ It allows naming the quotas.
+ This is based on http://xtables-addons.git.sourceforge.net
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+ bool '"quota2" Netfilter LOG support'
+ depends on NETFILTER_XT_MATCH_QUOTA2
+ depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no
+ default n
+ help
+ This option allows `quota2' to log ONCE when a quota limit
+ is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+ It logs similarly to how ipt_ULOG would without data.
+
+ If unsure, say `N'.
+
config NETFILTER_XT_MATCH_RATEEST
tristate '"rateest" match support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 32596978df1d..115b4fe90448 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -124,7 +124,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..df91e26f55f2 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -5,6 +5,7 @@
* After timer expires a kevent will be sent.
*
* Copyright (C) 2004, 2010 Nokia Corporation
+ *
* Written by Timo Teras <ext-timo.teras@nokia.com>
*
* Converted to x_tables and reworked for upstream inclusion
@@ -38,8 +39,10 @@
#include <linux/netfilter/xt_IDLETIMER.h>
#include <linux/kdev_t.h>
#include <linux/kobject.h>
+#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
+#include <net/net_namespace.h>
struct idletimer_tg_attr {
struct attribute attr;
@@ -56,6 +59,8 @@ struct idletimer_tg {
struct idletimer_tg_attr attr;
unsigned int refcnt;
+ bool send_nl_msg;
+ bool active;
};
static LIST_HEAD(idletimer_tg_list);
@@ -63,6 +68,32 @@ static DEFINE_MUTEX(list_mutex);
static struct kobject *idletimer_tg_kobj;
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+ char iface_msg[NLMSG_MAX_SIZE];
+ char state_msg[NLMSG_MAX_SIZE];
+ char *envp[] = { iface_msg, state_msg, NULL };
+ int res;
+
+ res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+ iface);
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)", res);
+ return;
+ }
+ res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+ timer->active ? "active" : "inactive");
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)", res);
+ return;
+ }
+ pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+ kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+ return;
+
+
+}
+
static
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
@@ -83,6 +114,7 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
{
struct idletimer_tg *timer;
unsigned long expires = 0;
+ unsigned long now = jiffies;
mutex_lock(&list_mutex);
@@ -92,11 +124,15 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
mutex_unlock(&list_mutex);
- if (time_after(expires, jiffies))
+ if (time_after(expires, now))
return sprintf(buf, "%u\n",
- jiffies_to_msecs(expires - jiffies) / 1000);
+ jiffies_to_msecs(expires - now) / 1000);
- return sprintf(buf, "0\n");
+ if (timer->send_nl_msg)
+ return sprintf(buf, "0 %d\n",
+ jiffies_to_msecs(now - expires) / 1000);
+ else
+ return sprintf(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +141,9 @@ static void idletimer_tg_work(struct work_struct *work)
work);
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+ if (timer->send_nl_msg)
+ notify_netlink_uevent(timer->attr.attr.name, timer);
}
static void idletimer_tg_expired(unsigned long data)
@@ -113,6 +152,7 @@ static void idletimer_tg_expired(unsigned long data)
pr_debug("timer %s expired\n", timer->attr.attr.name);
+ timer->active = false;
schedule_work(&timer->work);
}
@@ -145,6 +185,8 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
setup_timer(&info->timer->timer, idletimer_tg_expired,
(unsigned long) info->timer);
info->timer->refcnt = 1;
+ info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+ info->timer->active = true;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -168,14 +210,24 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
+ unsigned long now = jiffies;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
BUG_ON(!info->timer);
+ info->timer->active = true;
+
+ if (time_before(info->timer->timer.expires, now)) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+ info->label, info->timer->timer.expires, now);
+ }
+
+ /* TODO: Avoid modifying timers on each packet */
mod_timer(&info->timer->timer,
- msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ msecs_to_jiffies(info->timeout * 1000) + now);
return XT_CONTINUE;
}
@@ -184,8 +236,9 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
{
struct idletimer_tg_info *info = par->targinfo;
int ret;
+ unsigned long now = jiffies;
- pr_debug("checkentry targinfo%s\n", info->label);
+ pr_debug("checkentry targinfo %s\n", info->label);
if (info->timeout == 0) {
pr_debug("timeout value is zero\n");
@@ -204,8 +257,16 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
info->timer->refcnt++;
+ info->timer->active = true;
+
+ if (time_before(info->timer->timer.expires, now)) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+ info->timer->timer.expires, now);
+ }
+
mod_timer(&info->timer->timer,
- msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ msecs_to_jiffies(info->timeout * 1000) + now);
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
@@ -219,6 +280,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
}
mutex_unlock(&list_mutex);
+
return 0;
}
@@ -240,7 +302,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
kfree(info->timer);
} else {
pr_debug("decreased refcnt of timer %s to %u\n",
- info->label, info->timer->refcnt);
+ info->label, info->timer->refcnt);
}
mutex_unlock(&list_mutex);
@@ -248,6 +310,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
static struct xt_target idletimer_tg __read_mostly = {
.name = "IDLETIMER",
+ .revision = 1,
.family = NFPROTO_UNSPEC,
.target = idletimer_tg_target,
.targetsize = sizeof(struct idletimer_tg_info),
@@ -313,3 +376,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("ipt_IDLETIMER");
MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 000000000000..223fa8be8e85
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,2982 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+ ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+#else
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR;
+#endif
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+static gid_t proc_stats_readall_gid = AID_NET_BW_STATS;
+static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT;
+#else
+/* 0 means, don't limit anybody */
+static gid_t proc_stats_readall_gid;
+static gid_t proc_ctrl_write_gid;
+#endif
+module_param_named(stats_readall_gid, proc_stats_readall_gid, uint,
+ S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint,
+ S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ * - the iface stats handling will not act on notifications.
+ * - iptables matches will never match.
+ * - ctrl commands silently succeed.
+ * - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ * - don't create proc specific structs to track tags
+ * - don't check that active tag stats exceed some limits.
+ * - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+ S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+/*
+ * Ordering of locks:
+ * outer locks:
+ * iface_stat_list_lock
+ * sock_tag_list_lock
+ * inner locks:
+ * uid_tag_data_tree_lock
+ * tag_counter_set_list_lock
+ * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
+ * is acquired.
+ *
+ * Call tree with all lock holders as of 2012-04-27:
+ *
+ * iface_stat_fmt_proc_read()
+ * iface_stat_list_lock
+ * (struct iface_stat)
+ *
+ * qtaguid_ctrl_proc_read()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * (struct proc_qtu_data->sock_tag_list)
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * (proc_qtu_data_tree)
+ * iface_stat_list_lock
+ *
+ * qtaguid_stats_proc_read()
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ *
+ * qtudev_open()
+ * uid_tag_data_tree_lock
+ *
+ * qtudev_release()
+ * sock_tag_data_list_lock
+ * uid_tag_data_tree_lock
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ * iface_stat_list_lock
+ *
+ * iface_netdev_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inetaddr_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inet6addr_event_handler()
+ * iface_stat_create_ipv6()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * qtaguid_mt()
+ * account_for_uid()
+ * if_tag_stat_update()
+ * get_sock_stat()
+ * sock_tag_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ *
+ *
+ * qtaguid_ctrl_parse()
+ * ctrl_cmd_delete()
+ * sock_tag_list_lock
+ * tag_counter_set_list_lock
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * uid_tag_data_tree_lock
+ * ctrl_cmd_counter_set()
+ * tag_counter_set_list_lock
+ * ctrl_cmd_tag()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * get_tag_ref()
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * uid_tag_data_tree_lock
+ * (proc_qtu_data_tree)
+ * ctrl_cmd_untag()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ *
+ */
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid)
+ || in_egroup_p(proc_ctrl_write_gid);
+}
+
+static bool can_impersonate_uid(uid_t uid)
+{
+ return uid == current_fsuid() || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(uid_t uid)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || uid == current_fsuid()
+ || unlikely(!proc_stats_readall_gid)
+ || in_egroup_p(proc_stats_readall_gid);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+ enum ifs_tx_rx direction,
+ enum ifs_proto ifs_proto,
+ int bytes,
+ int packets)
+{
+ counters->bpc[set][direction][ifs_proto].bytes += bytes;
+ counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].bytes
+ + counters->bpc[set][direction][IFS_UDP].bytes
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].packets
+ + counters->bpc[set][direction][IFS_UDP].packets
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct tag_node *data = rb_entry(node, struct tag_node, node);
+ int result;
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " node=%p data=%p\n", tag, node, data);
+ result = tag_compare(tag, data->tag);
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " data.tag=0x%llx (uid=%u) res=%d\n",
+ tag, data->tag, get_uid_from_tag(data->tag), result);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct tag_node *this = rb_entry(*new, struct tag_node,
+ node);
+ int result = tag_compare(data->tag, this->tag);
+ RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ this->tag,
+ get_uid_from_tag(this->tag));
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+ struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+ tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+ const struct sock *sk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct sock_tag *data = rb_entry(node, struct sock_tag,
+ sock_node);
+ if (sk < data->sk)
+ node = node->rb_left;
+ else if (sk > data->sk)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct sock_tag *this = rb_entry(*new, struct sock_tag,
+ sock_node);
+ parent = *new;
+ if (data->sk < this->sk)
+ new = &((*new)->rb_left);
+ else if (data->sk > this->sk)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->sock_node, parent, new);
+ rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+
+ node = rb_first(st_to_free_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ node = rb_next(node);
+ CT_DEBUG("qtaguid: %s(): "
+ "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+ st_entry->sk,
+ st_entry->tag,
+ get_uid_from_tag(st_entry->tag));
+ rb_erase(&st_entry->sock_node, st_to_free_tree);
+ sockfd_put(st_entry->socket);
+ kfree(st_entry);
+ }
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+ const pid_t pid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct proc_qtu_data *data = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ if (pid < data->pid)
+ node = node->rb_left;
+ else if (pid > data->pid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct proc_qtu_data *this = rb_entry(*new,
+ struct proc_qtu_data,
+ node);
+ parent = *new;
+ if (data->pid < this->pid)
+ new = &((*new)->rb_left);
+ else if (data->pid > this->pid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct uid_tag_data *this = rb_entry(*new,
+ struct uid_tag_data,
+ node);
+ parent = *new;
+ if (data->uid < this->uid)
+ new = &((*new)->rb_left);
+ else if (data->uid > this->uid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+ uid_t uid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct uid_tag_data *data = rb_entry(node,
+ struct uid_tag_data,
+ node);
+ if (uid < data->uid)
+ node = node->rb_left;
+ else if (uid > data->uid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ * sets *found to true if not allocated.
+ * sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+ struct uid_tag_data *utd_entry;
+
+ /* Look for top level uid_tag_data for the UID */
+ utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+ DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+ if (found_res)
+ *found_res = utd_entry;
+ if (utd_entry)
+ return utd_entry;
+
+ utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+ if (!utd_entry) {
+ pr_err("qtaguid: get_uid_data(%u): "
+ "tag data alloc failed\n", uid);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ utd_entry->uid = uid;
+ utd_entry->tag_ref_tree = RB_ROOT;
+ uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+ DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+ return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+ struct uid_tag_data *utd_entry)
+{
+ struct tag_ref *tr_entry;
+ int res;
+
+ if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+ pr_info("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc quota exceeded. max=%d\n",
+ new_tag, max_sock_tags);
+ res = -EMFILE;
+ goto err_res;
+
+ }
+
+ tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+ if (!tr_entry) {
+ pr_err("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc failed\n",
+ new_tag);
+ res = -ENOMEM;
+ goto err_res;
+ }
+ tr_entry->tn.tag = new_tag;
+ /* tr_entry->num_sock_tags handled by caller */
+ utd_entry->num_active_tags++;
+ tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+ " inserted new tag ref %p\n",
+ new_tag, tr_entry);
+ return tr_entry;
+
+err_res:
+ return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+ bool found_utd;
+ uid_t uid = get_uid_from_tag(full_tag);
+
+ DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+ full_tag, uid);
+
+ utd_entry = get_uid_data(uid, &found_utd);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ if (utd_res)
+ *utd_res = utd_entry;
+ return NULL;
+ }
+
+ tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+ full_tag);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ if (!tr_entry)
+ tr_entry = new_tag_ref(full_tag, utd_entry);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+ /* Are we done with the UID tag data entry? */
+ if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+ !utd_entry->num_pqd) {
+ DR_DEBUG("qtaguid: %s(): "
+ "erase utd_entry=%p uid=%u "
+ "by pid=%u tgid=%u uid=%u\n", __func__,
+ utd_entry, utd_entry->uid,
+ current->pid, current->tgid, current_fsuid());
+ BUG_ON(utd_entry->num_active_tags);
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ } else {
+ DR_DEBUG("qtaguid: %s(): "
+ "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+ __func__, utd_entry, utd_entry->num_active_tags,
+ utd_entry->num_pqd);
+ BUG_ON(!(utd_entry->num_active_tags ||
+ utd_entry->num_pqd));
+ }
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+ struct uid_tag_data *utd_entry)
+{
+ DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+ tr_entry, tr_entry->tn.tag,
+ get_uid_from_tag(tr_entry->tn.tag));
+ if (!tr_entry->num_sock_tags) {
+ BUG_ON(!utd_entry->num_active_tags);
+ utd_entry->num_active_tags--;
+ rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+ kfree(tr_entry);
+ }
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+ struct rb_node *node;
+ struct tag_ref *tr_entry;
+ tag_t acct_tag;
+
+ DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+ full_tag, get_uid_from_tag(full_tag));
+ acct_tag = get_atag_from_tag(full_tag);
+ node = rb_first(&utd_entry->tag_ref_tree);
+ while (node) {
+ tr_entry = rb_entry(node, struct tag_ref, tn.node);
+ node = rb_next(node);
+ if (!acct_tag || tr_entry->tn.tag == full_tag)
+ free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+ }
+}
+
+static int read_proc_u64(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ uint64_t value;
+ char *p = page;
+ uint64_t *iface_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *iface_entry;
+ p += sprintf(p, "%llu\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int read_proc_bool(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ bool value;
+ char *p = page;
+ bool *bool_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *bool_entry;
+ p += sprintf(p, "%u\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+ int active_set = 0;
+ struct tag_counter_set *tcs;
+
+ MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+ " (uid=%u)\n",
+ tag, get_uid_from_tag(tag));
+ /* For now we only handle UID tags for active sets */
+ tag = get_utag_from_tag(tag);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs)
+ active_set = tcs->active_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+ struct iface_stat *iface_entry;
+
+ /* Find the entry for tracking the specified tag within the interface */
+ if (ifname == NULL) {
+ pr_info("qtaguid: iface_stat: get() NULL device name\n");
+ return NULL;
+ }
+
+ /* Iterate over interfaces */
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (!strcmp(ifname, iface_entry->ifname))
+ goto done;
+ }
+ iface_entry = NULL;
+done:
+ return iface_entry;
+}
+
+static int iface_stat_fmt_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count,
+ int *eof, void *data)
+{
+ char *outp = page;
+ int item_index = 0;
+ int len;
+ int fmt = (int)data; /* The data is just 1 (old) or 2 (uses fmt) */
+ struct iface_stat *iface_entry;
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct rtnl_link_stats64 no_dev_stats = {0};
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ CT_DEBUG("qtaguid:proc iface_stat_fmt "
+ "pid=%u tgid=%u uid=%u "
+ "page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ if (fmt == 2 && item_index++ >= items_to_skip) {
+ len = snprintf(outp, char_count,
+ "ifname "
+ "total_skb_rx_bytes total_skb_rx_packets "
+ "total_skb_tx_bytes total_skb_tx_packets\n"
+ );
+ if (len >= char_count) {
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ /*
+ * This lock will prevent iface_stat_update() from changing active,
+ * and in turn prevent an interface from unregistering itself.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (item_index++ < items_to_skip)
+ continue;
+
+ if (iface_entry->active) {
+ stats = dev_get_stats(iface_entry->net_dev,
+ &dev_stats);
+ } else {
+ stats = &no_dev_stats;
+ }
+ /*
+ * If the meaning of the data changes, then update the fmtX
+ * string.
+ */
+ if (fmt == 1) {
+ len = snprintf(
+ outp, char_count,
+ "%s %d "
+ "%llu %llu %llu %llu "
+ "%llu %llu %llu %llu\n",
+ iface_entry->ifname,
+ iface_entry->active,
+ iface_entry->totals_via_dev[IFS_RX].bytes,
+ iface_entry->totals_via_dev[IFS_RX].packets,
+ iface_entry->totals_via_dev[IFS_TX].bytes,
+ iface_entry->totals_via_dev[IFS_TX].packets,
+ stats->rx_bytes, stats->rx_packets,
+ stats->tx_bytes, stats->tx_packets
+ );
+ } else {
+ len = snprintf(
+ outp, char_count,
+ "%s "
+ "%llu %llu %llu %llu\n",
+ iface_entry->ifname,
+ iface_entry->totals_via_skb[IFS_RX].bytes,
+ iface_entry->totals_via_skb[IFS_RX].packets,
+ iface_entry->totals_via_skb[IFS_TX].bytes,
+ iface_entry->totals_via_skb[IFS_TX].packets
+ );
+ }
+ if (len >= char_count) {
+ spin_unlock_bh(&iface_stat_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return outp - page;
+}
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+ struct proc_dir_entry *proc_entry;
+ struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+ iface_work);
+ struct iface_stat *new_iface = isw->iface_entry;
+
+ /* iface_entries are not deleted, so safe to manipulate. */
+ proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+ if (IS_ERR_OR_NULL(proc_entry)) {
+ pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+ kfree(isw);
+ return;
+ }
+
+ new_iface->proc_ptr = proc_entry;
+
+ create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_TX].bytes);
+ create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_RX].bytes);
+ create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_TX].packets);
+ create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_RX].packets);
+ create_proc_read_entry("active", proc_iface_perms, proc_entry,
+ read_proc_bool, &new_iface->active);
+
+ IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+ "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+ kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+ struct net_device *net_dev,
+ bool activate)
+{
+ if (activate) {
+ entry->net_dev = net_dev;
+ entry->active = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "enable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+ } else {
+ entry->active = false;
+ entry->net_dev = NULL;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+
+ }
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+ struct iface_stat *new_iface;
+ struct iface_stat_work *isw;
+
+ new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+ if (new_iface == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "iface_stat alloc failed\n", net_dev->name);
+ return NULL;
+ }
+ new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+ if (new_iface->ifname == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "ifname alloc failed\n", net_dev->name);
+ kfree(new_iface);
+ return NULL;
+ }
+ spin_lock_init(&new_iface->tag_stat_list_lock);
+ new_iface->tag_stat_tree = RB_ROOT;
+ _iface_stat_set_active(new_iface, net_dev, true);
+
+ /*
+ * ipv6 notifier chains are atomic :(
+ * No create_proc_read_entry() for you!
+ */
+ isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+ if (!isw) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "work alloc failed\n", new_iface->ifname);
+ _iface_stat_set_active(new_iface, net_dev, false);
+ kfree(new_iface->ifname);
+ kfree(new_iface);
+ return NULL;
+ }
+ isw->iface_entry = new_iface;
+ INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+ schedule_work(&isw->iface_work);
+ list_add(&new_iface->list, &iface_stat_list);
+ return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+ struct iface_stat *iface)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ bool stats_rewound;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ /* No empty packets */
+ stats_rewound =
+ (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+ || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+ IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+ "bytes rx/tx=%llu/%llu "
+ "active=%d last_known=%d "
+ "stats_rewound=%d\n", __func__,
+ net_dev ? net_dev->name : "?",
+ iface, net_dev,
+ stats->rx_bytes, stats->tx_bytes,
+ iface->active, iface->last_known_valid, stats_rewound);
+
+ if (iface->active && iface->last_known_valid && stats_rewound) {
+ pr_warn_once("qtaguid: iface_stat: %s(%s): "
+ "iface reset its stats unexpectedly\n", __func__,
+ net_dev->name);
+
+ iface->totals_via_dev[IFS_TX].bytes +=
+ iface->last_known[IFS_TX].bytes;
+ iface->totals_via_dev[IFS_TX].packets +=
+ iface->last_known[IFS_TX].packets;
+ iface->totals_via_dev[IFS_RX].bytes +=
+ iface->last_known[IFS_RX].bytes;
+ iface->totals_via_dev[IFS_RX].packets +=
+ iface->last_known[IFS_RX].packets;
+ iface->last_known_valid = false;
+ IF_DEBUG("qtaguid: %s(%s): iface=%p "
+ "used last known bytes rx/tx=%llu/%llu\n", __func__,
+ iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+ iface->last_known[IFS_TX].bytes);
+ }
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+ struct in_ifaddr *ifa)
+{
+ struct in_device *in_dev = NULL;
+ const char *ifname;
+ struct iface_stat *entry;
+ __be32 ipaddr = 0;
+ struct iface_stat *new_iface;
+
+ IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+ net_dev ? net_dev->name : "?",
+ ifa, net_dev);
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create(): no net dev\n");
+ return;
+ }
+
+ ifname = net_dev->name;
+ if (!ifa) {
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+ IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+ ifname, in_dev);
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ifa=%p ifa_label=%s\n",
+ ifname, ifa,
+ ifa->ifa_label ? ifa->ifa_label : "(null)");
+ if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+ break;
+ }
+ }
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ ipaddr = ifa->ifa_local;
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !ipv4_is_loopback(ipaddr);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI4\n", __func__,
+ entry->ifname, activate, &ipaddr);
+ goto done_unlock_put;
+ } else if (ipv4_is_loopback(ipaddr)) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+ "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ if (in_dev)
+ in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+ struct inet6_ifaddr *ifa)
+{
+ struct in_device *in_dev;
+ const char *ifname;
+ struct iface_stat *entry;
+ struct iface_stat *new_iface;
+ int addr_type;
+
+ IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+ ifa, net_dev, net_dev ? net_dev->name : "");
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+ return;
+ }
+ ifname = net_dev->name;
+
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+ ifname, in_dev);
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ addr_type = ipv6_addr_type(&ifa->addr);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !(addr_type & IPV6_ADDR_LOOPBACK);
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI6c\n", __func__,
+ entry->ifname, activate, &ifa->addr);
+ goto done_unlock_put;
+ } else if (addr_type & IPV6_ADDR_LOOPBACK) {
+ IF_DEBUG("qtaguid: %s(%s): "
+ "ignore loopback dev. ip=%pI6c\n", __func__,
+ ifname, &ifa->addr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+ "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+ MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+ return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+ struct sock_tag *sock_tag_entry;
+ MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+ if (!sk)
+ return NULL;
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(sk);
+ spin_unlock_bh(&sock_tag_list_lock);
+ return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ int thoff, tproto;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ if (tproto < 0)
+ MT_DEBUG("%s(): transport header not found in ipv6"
+ " skb=%p\n", __func__, skb);
+ break;
+ case NFPROTO_IPV4:
+ tproto = ip_hdr(skb)->protocol;
+ break;
+ default:
+ tproto = IPPROTO_RAW;
+ }
+ return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+ break;
+ case IPPROTO_UDP:
+ dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+ break;
+ case IPPROTO_IP:
+ default:
+ dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+ 1);
+ break;
+ }
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct iface_stat *entry;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(net_dev->name);
+ if (entry == NULL) {
+ IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ net_dev->name, entry);
+ if (!entry->active) {
+ IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ if (stash_only) {
+ entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+ entry->last_known[IFS_TX].packets = stats->tx_packets;
+ entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+ entry->last_known[IFS_RX].packets = stats->rx_packets;
+ entry->last_known_valid = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+ entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+ entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+ entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+ entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+ /* We don't need the last_known[] anymore */
+ entry->last_known_valid = false;
+ _iface_stat_set_active(entry, net_dev, false);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ struct iface_stat *entry;
+ const struct net_device *el_dev;
+ enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+ int bytes = skb->len;
+
+ if (!skb->dev) {
+ MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+ el_dev = par->in ? : par->out;
+ } else {
+ const struct net_device *other_dev;
+ el_dev = skb->dev;
+ other_dev = par->in ? : par->out;
+ if (el_dev != other_dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+ "par->(in/out)=%p %s\n",
+ par->hooknum, el_dev, el_dev->name, other_dev,
+ other_dev->name);
+ }
+ }
+
+ if (unlikely(!el_dev)) {
+ pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ } else if (unlikely(!el_dev->name)) {
+ pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ } else {
+ int proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto);
+ }
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(el_dev->name);
+ if (entry == NULL) {
+ IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+ __func__, el_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ el_dev->name, entry);
+
+ entry->totals_via_skb[direction].bytes += bytes;
+ entry->totals_via_skb[direction].packets++;
+ spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ int active_set;
+ active_set = get_active_counter_set(tag_entry->tn.tag);
+ MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+ "dir=%d proto=%d bytes=%d)\n",
+ tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+ active_set, direction, proto, bytes);
+ data_counters_update(&tag_entry->counters, active_set, direction,
+ proto, bytes);
+ if (tag_entry->parent_counters)
+ data_counters_update(tag_entry->parent_counters, active_set,
+ direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+ tag_t tag)
+{
+ struct tag_stat *new_tag_stat_entry = NULL;
+ IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ iface_entry, tag, get_uid_from_tag(tag));
+ new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+ if (!new_tag_stat_entry) {
+ pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+ goto done;
+ }
+ new_tag_stat_entry->tn.tag = tag;
+ tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+ return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+ const struct sock *sk, enum ifs_tx_rx direction,
+ int proto, int bytes)
+{
+ struct tag_stat *tag_stat_entry;
+ tag_t tag, acct_tag;
+ tag_t uid_tag;
+ struct data_counters *uid_tag_counters;
+ struct sock_tag *sock_tag_entry;
+ struct iface_stat *iface_entry;
+ struct tag_stat *new_tag_stat = NULL;
+ MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+ "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+ ifname, uid, sk, direction, proto, bytes);
+
+
+ iface_entry = get_iface_entry(ifname);
+ if (!iface_entry) {
+ pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
+ ifname);
+ return;
+ }
+ /* It is ok to process data when an iface_entry is inactive */
+
+ MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+ ifname, iface_entry);
+
+ /*
+ * Look for a tagged sock.
+ * It will have an acct_uid.
+ */
+ sock_tag_entry = get_sock_stat(sk);
+ if (sock_tag_entry) {
+ tag = sock_tag_entry->tag;
+ acct_tag = get_atag_from_tag(tag);
+ uid_tag = get_utag_from_tag(tag);
+ } else {
+ acct_tag = make_atag_from_value(0);
+ tag = combine_atag_with_uid(acct_tag, uid);
+ uid_tag = make_tag_from_uid(uid);
+ }
+ MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+ " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+ tag, get_uid_from_tag(tag), iface_entry);
+ /* Loop over tag list under this interface for {acct_tag,uid_tag} */
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ tag);
+ if (tag_stat_entry) {
+ /*
+ * Updating the {acct_tag, uid_tag} entry handles both stats:
+ * {0, uid_tag} will also get updated.
+ */
+ tag_stat_update(tag_stat_entry, direction, proto, bytes);
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ return;
+ }
+
+ /* Loop over tag list under this interface for {0,uid_tag} */
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ uid_tag);
+ if (!tag_stat_entry) {
+ /* Here: the base uid_tag did not exist */
+ /*
+ * No parent counters. So
+ * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+ */
+ new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+ if (!new_tag_stat)
+ goto unlock;
+ uid_tag_counters = &new_tag_stat->counters;
+ } else {
+ uid_tag_counters = &tag_stat_entry->counters;
+ }
+
+ if (acct_tag) {
+ /* Create the child {acct_tag, uid_tag} and hook up parent. */
+ new_tag_stat = create_if_tag_stat(iface_entry, tag);
+ if (!new_tag_stat)
+ goto unlock;
+ new_tag_stat->parent_counters = uid_tag_counters;
+ } else {
+ /*
+ * For new_tag_stat to be still NULL here would require:
+ * {0, uid_tag} exists
+ * and {acct_tag, uid_tag} doesn't exist
+ * AND acct_tag == 0.
+ * Impossible. This reassures us that new_tag_stat
+ * below will always be assigned.
+ */
+ BUG_ON(!new_tag_stat);
+ }
+ tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr) {
+ struct net_device *dev = ptr;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+ "ev=0x%lx/%s netdev=%p->name=%s\n",
+ event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+ switch (event) {
+ case NETDEV_UP:
+ iface_stat_create(dev, NULL);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_create_ipv6(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_create(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+ .notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+ .notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+ .notifier_call = iface_inet6addr_event_handler,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+ int err;
+
+ iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+ if (!iface_stat_procdir) {
+ pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+ err = -1;
+ goto err;
+ }
+
+ iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename,
+ proc_iface_perms,
+ parent_procdir);
+ if (!iface_stat_all_procfile) {
+ pr_err("qtaguid: iface_stat: init "
+ " failed to create stat_old proc entry\n");
+ err = -1;
+ goto err_zap_entry;
+ }
+ iface_stat_all_procfile->read_proc = iface_stat_fmt_proc_read;
+ iface_stat_all_procfile->data = (void *)1; /* fmt1 */
+
+ iface_stat_fmt_procfile = create_proc_entry(iface_stat_fmt_procfilename,
+ proc_iface_perms,
+ parent_procdir);
+ if (!iface_stat_fmt_procfile) {
+ pr_err("qtaguid: iface_stat: init "
+ " failed to create stat_all proc entry\n");
+ err = -1;
+ goto err_zap_all_stats_entry;
+ }
+ iface_stat_fmt_procfile->read_proc = iface_stat_fmt_proc_read;
+ iface_stat_fmt_procfile->data = (void *)2; /* fmt2 */
+
+
+ err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register dev event handler\n");
+ goto err_zap_all_stats_entries;
+ }
+ err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv4 dev event handler\n");
+ goto err_unreg_nd;
+ }
+
+ err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv6 dev event handler\n");
+ goto err_unreg_ip4_addr;
+ }
+ return 0;
+
+err_unreg_ip4_addr:
+ unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+ unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+ remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+ remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+ remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+ return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ struct sock *sk;
+ unsigned int hook_mask = (1 << par->hooknum);
+
+ MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+ par->hooknum, par->family);
+
+ /*
+ * Let's not abuse the the xt_socket_get*_sk(), or else it will
+ * return garbage SKs.
+ */
+ if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+ return NULL;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ sk = xt_socket_get6_sk(skb, par);
+ break;
+ case NFPROTO_IPV4:
+ sk = xt_socket_get4_sk(skb, par);
+ break;
+ default:
+ return NULL;
+ }
+
+ /*
+ * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
+ * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
+ * Not fixed in 3.0-r3 :(
+ */
+ if (sk) {
+ MT_DEBUG("qtaguid: %p->sk_proto=%u "
+ "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+ if (sk->sk_state == TCP_TIME_WAIT) {
+ xt_socket_put_sk(sk);
+ sk = NULL;
+ }
+ }
+ return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+ const struct sock *alternate_sk, uid_t uid,
+ struct xt_action_param *par)
+{
+ const struct net_device *el_dev;
+
+ if (!skb->dev) {
+ MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+ el_dev = par->in ? : par->out;
+ } else {
+ const struct net_device *other_dev;
+ el_dev = skb->dev;
+ other_dev = par->in ? : par->out;
+ if (el_dev != other_dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+ "par->(in/out)=%p %s\n",
+ par->hooknum, el_dev, el_dev->name, other_dev,
+ other_dev->name);
+ }
+ }
+
+ if (unlikely(!el_dev)) {
+ pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+ } else if (unlikely(!el_dev->name)) {
+ pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+ } else {
+ int proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto);
+
+ if_tag_stat_update(el_dev->name, uid,
+ skb->sk ? skb->sk : alternate_sk,
+ par->in ? IFS_RX : IFS_TX,
+ proto, skb->len);
+ }
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_qtaguid_match_info *info = par->matchinfo;
+ const struct file *filp;
+ bool got_sock = false;
+ struct sock *sk;
+ uid_t sock_uid;
+ bool res;
+
+ if (unlikely(module_passive))
+ return (info->match ^ info->invert) == 0;
+
+ MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+ par->hooknum, skb, par->in, par->out, par->family);
+
+ atomic64_inc(&qtu_events.match_calls);
+ if (skb == NULL) {
+ res = (info->match ^ info->invert) == 0;
+ goto ret_res;
+ }
+
+ switch (par->hooknum) {
+ case NF_INET_PRE_ROUTING:
+ case NF_INET_POST_ROUTING:
+ atomic64_inc(&qtu_events.match_calls_prepost);
+ iface_stat_update_from_skb(skb, par);
+ /*
+ * We are done in pre/post. The skb will get processed
+ * further alter.
+ */
+ res = (info->match ^ info->invert);
+ goto ret_res;
+ break;
+ /* default: Fall through and do UID releated work */
+ }
+
+ sk = skb->sk;
+ if (sk == NULL) {
+ /*
+ * A missing sk->sk_socket happens when packets are in-flight
+ * and the matching socket is already closed and gone.
+ */
+ sk = qtaguid_find_sk(skb, par);
+ /*
+ * If we got the socket from the find_sk(), we will need to put
+ * it back, as nf_tproxy_get_sock_v4() got it.
+ */
+ got_sock = sk;
+ if (sk)
+ atomic64_inc(&qtu_events.match_found_sk_in_ct);
+ else
+ atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+ } else {
+ atomic64_inc(&qtu_events.match_found_sk);
+ }
+ MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+ par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+ if (sk != NULL) {
+ MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+ par->hooknum, filp ? filp->f_cred->fsuid : -1);
+ }
+
+ if (sk == NULL || sk->sk_socket == NULL) {
+ /*
+ * Here, the qtaguid_find_sk() using connection tracking
+ * couldn't find the owner, so for now we just count them
+ * against the system.
+ */
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not
+ * requested.
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, 0, par);
+ MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+ par->hooknum,
+ sk ? sk->sk_socket : NULL);
+ res = (info->match ^ info->invert) == 0;
+ atomic64_inc(&qtu_events.match_no_sk);
+ goto put_sock_ret_res;
+ } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+ res = false;
+ goto put_sock_ret_res;
+ }
+ filp = sk->sk_socket->file;
+ if (filp == NULL) {
+ MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+ account_for_uid(skb, sk, 0, par);
+ res = ((info->match ^ info->invert) &
+ (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+ atomic64_inc(&qtu_events.match_no_sk_file);
+ goto put_sock_ret_res;
+ }
+ sock_uid = filp->f_cred->fsuid;
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not requested
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, sock_uid, par);
+
+ /*
+ * The following two tests fail the match when:
+ * id not in range AND no inverted condition requested
+ * or id in range AND inverted condition requested
+ * Thus (!a && b) || (a && !b) == a ^ b
+ */
+ if (info->match & XT_QTAGUID_UID)
+ if ((filp->f_cred->fsuid >= info->uid_min &&
+ filp->f_cred->fsuid <= info->uid_max) ^
+ !(info->invert & XT_QTAGUID_UID)) {
+ MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+ if (info->match & XT_QTAGUID_GID)
+ if ((filp->f_cred->fsgid >= info->gid_min &&
+ filp->f_cred->fsgid <= info->gid_max) ^
+ !(info->invert & XT_QTAGUID_GID)) {
+ MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+
+ MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+ res = true;
+
+put_sock_ret_res:
+ if (got_sock)
+ xt_socket_put_sk(sk);
+ret_res:
+ MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+ return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+ va_list args;
+ char *fmt_buff;
+ char *buff;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ fmt_buff = kasprintf(GFP_ATOMIC,
+ "qtaguid: %s(): %s {\n", __func__, fmt);
+ BUG_ON(!fmt_buff);
+ va_start(args, fmt);
+ buff = kvasprintf(GFP_ATOMIC,
+ fmt_buff, args);
+ BUG_ON(!buff);
+ pr_debug("%s", buff);
+ kfree(fmt_buff);
+ kfree(buff);
+ va_end(args);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+ prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ prdebug_iface_stat_list(indent_level, &iface_stat_list);
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ char *outp = page;
+ int len;
+ uid_t uid;
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ int item_index = 0;
+ int indent_level = 0;
+ long f_count;
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (*eof)
+ return 0;
+
+ CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u "
+ "page=%p off=%ld char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, items_to_skip, char_count, *eof);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ for (node = rb_first(&sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ if (item_index++ < items_to_skip)
+ continue;
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ uid = get_uid_from_tag(sock_tag_entry->tag);
+ CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+ "pid=%u\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag,
+ uid,
+ sock_tag_entry->pid
+ );
+ f_count = atomic_long_read(
+ &sock_tag_entry->socket->file->f_count);
+ len = snprintf(outp, char_count,
+ "sock=%p tag=0x%llx (uid=%u) pid=%u "
+ "f_count=%lu\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag, uid,
+ sock_tag_entry->pid, f_count);
+ if (len >= char_count) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ if (item_index++ >= items_to_skip) {
+ len = snprintf(outp, char_count,
+ "events: sockets_tagged=%llu "
+ "sockets_untagged=%llu "
+ "counter_set_changes=%llu "
+ "delete_cmds=%llu "
+ "iface_events=%llu "
+ "match_calls=%llu "
+ "match_calls_prepost=%llu "
+ "match_found_sk=%llu "
+ "match_found_sk_in_ct=%llu "
+ "match_found_no_sk_in_ct=%llu "
+ "match_no_sk=%llu "
+ "match_no_sk_file=%llu\n",
+ atomic64_read(&qtu_events.sockets_tagged),
+ atomic64_read(&qtu_events.sockets_untagged),
+ atomic64_read(&qtu_events.counter_set_changes),
+ atomic64_read(&qtu_events.delete_cmds),
+ atomic64_read(&qtu_events.iface_events),
+ atomic64_read(&qtu_events.match_calls),
+ atomic64_read(&qtu_events.match_calls_prepost),
+ atomic64_read(&qtu_events.match_found_sk),
+ atomic64_read(&qtu_events.match_found_sk_in_ct),
+ atomic64_read(
+ &qtu_events.match_found_no_sk_in_ct),
+ atomic64_read(&qtu_events.match_no_sk),
+ atomic64_read(&qtu_events.match_no_sk_file));
+ if (len >= char_count) {
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ /* Count the following as part of the last item_index */
+ if (item_index > items_to_skip) {
+ prdebug_full_state(indent_level, "proc ctrl");
+ }
+
+ *eof = 1;
+ return outp - page;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+ char cmd;
+ uid_t uid;
+ uid_t entry_uid;
+ tag_t acct_tag;
+ tag_t tag;
+ int res, argc;
+ struct iface_stat *iface_entry;
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct tag_stat *ts_entry;
+ struct tag_counter_set *tcs_entry;
+ struct tag_ref *tr_entry;
+ struct uid_tag_data *utd_entry;
+
+ argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+ "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (argc < 3) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_delete(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = combine_atag_with_uid(acct_tag, uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "looking for tag=0x%llx (uid=%u)\n",
+ input, tag, uid);
+
+ /* Delete socket tags */
+ spin_lock_bh(&sock_tag_list_lock);
+ node = rb_first(&sock_tag_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ entry_uid = get_uid_from_tag(st_entry->tag);
+ node = rb_next(node);
+ if (entry_uid != uid)
+ continue;
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+ input, st_entry->tag, entry_uid);
+
+ if (!acct_tag || st_entry->tag == tag) {
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+ tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+ BUG_ON(tr_entry->num_sock_tags <= 0);
+ tr_entry->num_sock_tags--;
+ /*
+ * TODO: remove if, and start failing.
+ * This is a hack to work around the fact that in some
+ * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+ * and are trying to work around apps
+ * that didn't open the /dev/xt_qtaguid.
+ */
+ if (st_entry->list.next && st_entry->list.prev)
+ list_del(&st_entry->list);
+ }
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ /* Delete tag counter-sets */
+ spin_lock_bh(&tag_counter_set_list_lock);
+ /* Counter sets are only on the uid tag, not full tag */
+ tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs_entry) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+ input,
+ tcs_entry->tn.tag,
+ get_uid_from_tag(tcs_entry->tn.tag),
+ tcs_entry->active_set);
+ rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+ kfree(tcs_entry);
+ }
+ spin_unlock_bh(&tag_counter_set_list_lock);
+
+ /*
+ * If acct_tag is 0, then all entries belonging to uid are
+ * erased.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ node = rb_first(&iface_entry->tag_stat_tree);
+ while (node) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "ts tag=0x%llx (uid=%u)\n",
+ input, ts_entry->tn.tag, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ if (!acct_tag || ts_entry->tn.tag == tag) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase ts: %s 0x%llx %u\n",
+ input, iface_entry->ifname,
+ get_atag_from_tag(ts_entry->tn.tag),
+ entry_uid);
+ rb_erase(&ts_entry->tn.node,
+ &iface_entry->tag_stat_tree);
+ kfree(ts_entry);
+ }
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ /* Cleanup the uid_tag_data */
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ node = rb_first(&uid_tag_data_tree);
+ while (node) {
+ utd_entry = rb_entry(node, struct uid_tag_data, node);
+ entry_uid = utd_entry->uid;
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "utd uid=%u\n",
+ input, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ /*
+ * Go over the tag_refs, and those that don't have
+ * sock_tags using them are freed.
+ */
+ put_tag_ref_tree(tag, utd_entry);
+ put_utd_entry(utd_entry);
+ }
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ atomic64_inc(&qtu_events.delete_cmds);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+ char cmd;
+ uid_t uid = 0;
+ tag_t tag;
+ int res, argc;
+ struct tag_counter_set *tcs;
+ int counter_set;
+
+ argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+ "set=%d uid=%u\n", input, argc, cmd,
+ counter_set, uid);
+ if (argc != 3) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+ pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+ input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (!can_manipulate_uids()) {
+ pr_info("qtaguid: ctrl_counterset(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = make_tag_from_uid(uid);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (!tcs) {
+ tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+ if (!tcs) {
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ pr_err("qtaguid: ctrl_counterset(%s): "
+ "failed to alloc counter set\n",
+ input);
+ res = -ENOMEM;
+ goto err;
+ }
+ tcs->tn.tag = tag;
+ tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+ "(uid=%u) set=%d\n",
+ input, tag, get_uid_from_tag(tag), counter_set);
+ }
+ tcs->active_set = counter_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ atomic64_inc(&qtu_events.counter_set_changes);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ uid_t uid = 0;
+ tag_t acct_tag = make_atag_from_value(0);
+ tag_t full_tag;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *uid_tag_data_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ /* Unassigned args will get defaulted later. */
+ argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+ "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+ " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+ input, sock_fd, res, current->pid, current->tgid,
+ current_fsuid());
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ if (argc < 3) {
+ acct_tag = make_atag_from_value(0);
+ } else if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err_put;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): "
+ "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+ "in_group=%d in_egroup=%d\n",
+ input, current->pid, current->tgid, current_uid(),
+ current_euid(), current_fsuid(),
+ in_group_p(proc_ctrl_write_gid),
+ in_egroup_p(proc_ctrl_write_gid));
+ if (argc < 4) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_tag(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err_put;
+ }
+ full_tag = combine_atag_with_uid(acct_tag, uid);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+ if (IS_ERR(tag_ref_entry)) {
+ res = PTR_ERR(tag_ref_entry);
+ spin_unlock_bh(&sock_tag_list_lock);
+ goto err_put;
+ }
+ tag_ref_entry->num_sock_tags++;
+ if (sock_tag_entry) {
+ struct tag_ref *prev_tag_ref_entry;
+
+ CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+ "st@%p ...->f_count=%ld\n",
+ input, el_socket->sk, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ /*
+ * This is a re-tagging, so release the sock_fd that was
+ * locked at the time of the 1st tagging.
+ * There is still the ref from this call's sockfd_lookup() so
+ * it can be done within the spinlock.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+ &uid_tag_data_entry);
+ BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+ BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+ prev_tag_ref_entry->num_sock_tags--;
+ sock_tag_entry->tag = full_tag;
+ } else {
+ CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+ input, el_socket->sk);
+ sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+ GFP_ATOMIC);
+ if (!sock_tag_entry) {
+ pr_err("qtaguid: ctrl_tag(%s): "
+ "socket tag alloc failed\n",
+ input);
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -ENOMEM;
+ goto err_tag_unref_put;
+ }
+ sock_tag_entry->sk = el_socket->sk;
+ sock_tag_entry->socket = el_socket;
+ sock_tag_entry->pid = current->tgid;
+ sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+ uid);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once(
+ "qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid,
+ current_fsuid());
+ else
+ list_add(&sock_tag_entry->list,
+ &pqd_entry->sock_tag_list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+ atomic64_inc(&qtu_events.sockets_tagged);
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+ /* We keep the ref to the socket (file) until it is untagged */
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ return 0;
+
+err_tag_unref_put:
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ tag_ref_entry->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+ return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+ input, argc, cmd, sock_fd);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+ " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+ input, sock_fd, res, current->pid, current->tgid,
+ current_fsuid());
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ if (!sock_tag_entry) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -EINVAL;
+ goto err_put;
+ }
+ /*
+ * The socket already belongs to the current process
+ * so it can do whatever it wants to it.
+ */
+ rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+ tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+ BUG_ON(!tag_ref_entry);
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once("qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid, current_fsuid());
+ else
+ list_del(&sock_tag_entry->list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ /*
+ * We don't free tag_ref from the utd_entry here,
+ * only during a cmd_delete().
+ */
+ tag_ref_entry->num_sock_tags--;
+ spin_unlock_bh(&sock_tag_list_lock);
+ /*
+ * Release the sock_fd that was grabbed at tag time,
+ * and once more for the sockfd_lookup() here.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count) - 1);
+ sockfd_put(el_socket);
+
+ kfree(sock_tag_entry);
+ atomic64_inc(&qtu_events.sockets_untagged);
+
+ return 0;
+
+err_put:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+ return res;
+}
+
+static int qtaguid_ctrl_parse(const char *input, int count)
+{
+ char cmd;
+ int res;
+
+ CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+
+ cmd = input[0];
+ /* Collect params for commands */
+ switch (cmd) {
+ case 'd':
+ res = ctrl_cmd_delete(input);
+ break;
+
+ case 's':
+ res = ctrl_cmd_counter_set(input);
+ break;
+
+ case 't':
+ res = ctrl_cmd_tag(input);
+ break;
+
+ case 'u':
+ res = ctrl_cmd_untag(input);
+ break;
+
+ default:
+ res = -EINVAL;
+ goto err;
+ }
+ if (!res)
+ res = count;
+err:
+ CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res);
+ return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+ if (unlikely(module_passive))
+ return count;
+
+ if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(input_buf, buffer, count))
+ return -EFAULT;
+
+ input_buf[count] = '\0';
+ return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+ char *outp;
+ char **num_items_returned;
+ struct iface_stat *iface_entry;
+ struct tag_stat *ts_entry;
+ int item_index;
+ int items_to_skip;
+ int char_count;
+};
+
+static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
+{
+ int len;
+ struct data_counters *cnts;
+
+ if (!ppi->item_index) {
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ len = snprintf(ppi->outp, ppi->char_count,
+ "idx iface acct_tag_hex uid_tag_int cnt_set "
+ "rx_bytes rx_packets "
+ "tx_bytes tx_packets "
+ "rx_tcp_bytes rx_tcp_packets "
+ "rx_udp_bytes rx_udp_packets "
+ "rx_other_bytes rx_other_packets "
+ "tx_tcp_bytes tx_tcp_packets "
+ "tx_udp_bytes tx_udp_packets "
+ "tx_other_bytes tx_other_packets\n");
+ } else {
+ tag_t tag = ppi->ts_entry->tn.tag;
+ uid_t stat_uid = get_uid_from_tag(tag);
+ /* Detailed tags are not available to everybody */
+ if (get_atag_from_tag(tag)
+ && !can_read_other_uid_stats(stat_uid)) {
+ CT_DEBUG("qtaguid: stats line: "
+ "%s 0x%llx %u: insufficient priv "
+ "from pid=%u tgid=%u uid=%u\n",
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag), stat_uid,
+ current->pid, current->tgid, current_fsuid());
+ return 0;
+ }
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ cnts = &ppi->ts_entry->counters;
+ len = snprintf(
+ ppi->outp, ppi->char_count,
+ "%d %s 0x%llx %u %u "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu\n",
+ ppi->item_index,
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag),
+ stat_uid,
+ cnt_set,
+ dc_sum_bytes(cnts, cnt_set, IFS_RX),
+ dc_sum_packets(cnts, cnt_set, IFS_RX),
+ dc_sum_bytes(cnts, cnt_set, IFS_TX),
+ dc_sum_packets(cnts, cnt_set, IFS_TX),
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+ }
+ return len;
+}
+
+static bool pp_sets(struct proc_print_info *ppi)
+{
+ int len;
+ int counter_set;
+ for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+ counter_set++) {
+ len = pp_stats_line(ppi, counter_set);
+ if (len >= ppi->char_count) {
+ *ppi->outp = '\0';
+ return false;
+ }
+ if (len) {
+ ppi->outp += len;
+ ppi->char_count -= len;
+ (*ppi->num_items_returned)++;
+ }
+ }
+ return true;
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ struct proc_print_info ppi;
+ int len;
+
+ ppi.outp = page;
+ ppi.item_index = 0;
+ ppi.char_count = char_count;
+ ppi.num_items_returned = num_items_returned;
+ ppi.items_to_skip = items_to_skip;
+
+ if (unlikely(module_passive)) {
+ len = pp_stats_line(&ppi, 0);
+ /* The header should always be shorter than the buffer. */
+ BUG_ON(len >= ppi.char_count);
+ (*num_items_returned)++;
+ *eof = 1;
+ return len;
+ }
+
+ CT_DEBUG("qtaguid:proc stats pid=%u tgid=%u uid=%u "
+ "page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ /* The idx is there to help debug when things go belly up. */
+ len = pp_stats_line(&ppi, 0);
+ /* Don't advance the outp unless the whole line was printed */
+ if (len >= ppi.char_count) {
+ *ppi.outp = '\0';
+ return ppi.outp - page;
+ }
+ if (len) {
+ ppi.outp += len;
+ ppi.char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) {
+ struct rb_node *node;
+ spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ for (node = rb_first(&ppi.iface_entry->tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ if (!pp_sets(&ppi)) {
+ spin_unlock_bh(
+ &ppi.iface_entry->tag_stat_list_lock);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return ppi.outp - page;
+ }
+ }
+ spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return ppi.outp - page;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+ struct proc_qtu_data *new_pqd_entry;
+ int res;
+ bool utd_entry_found;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+ current->pid, current->tgid, current_fsuid());
+
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ /* Look for existing uid data, or alloc one. */
+ utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ res = PTR_ERR(utd_entry);
+ goto err_unlock;
+ }
+
+ /* Look for existing PID based proc_data */
+ pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+ current->tgid);
+ if (pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u "
+ "%s already opened\n",
+ current->pid, current->tgid, current_fsuid(),
+ QTU_DEV_NAME);
+ res = -EBUSY;
+ goto err_unlock_free_utd;
+ }
+
+ new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+ if (!new_pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+ "proc data alloc failed\n",
+ current->pid, current->tgid, current_fsuid());
+ res = -ENOMEM;
+ goto err_unlock_free_utd;
+ }
+ new_pqd_entry->pid = current->tgid;
+ INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+ new_pqd_entry->parent_tag_data = utd_entry;
+ utd_entry->num_pqd++;
+
+ proc_qtu_data_tree_insert(new_pqd_entry,
+ &proc_qtu_data_tree);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+ current_fsuid(), new_pqd_entry);
+ file->private_data = new_pqd_entry;
+ return 0;
+
+err_unlock_free_utd:
+ if (!utd_entry_found) {
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ }
+err_unlock:
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+ struct proc_qtu_data *pqd_entry = file->private_data;
+ struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct list_head *entry, *next;
+ struct tag_ref *tr;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ /*
+ * Do not trust the current->pid, it might just be a kworker cleaning
+ * up after a dead proc.
+ */
+ DR_DEBUG("qtaguid: qtudev_release(): "
+ "pid=%u tgid=%u uid=%u "
+ "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+ current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+ pqd_entry, pqd_entry->pid, utd_entry,
+ utd_entry->num_active_tags);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+ st_entry = list_entry(entry, struct sock_tag, list);
+ DR_DEBUG("qtaguid: %s(): "
+ "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+ __func__,
+ st_entry, st_entry->sk,
+ current->pid, current->tgid,
+ pqd_entry->parent_tag_data->uid);
+
+ utd_entry = uid_tag_data_tree_search(
+ &uid_tag_data_tree,
+ get_uid_from_tag(st_entry->tag));
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ DR_DEBUG("qtaguid: %s(): "
+ "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+ st_entry->tag, utd_entry);
+ tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+ st_entry->tag);
+ BUG_ON(!tr);
+ BUG_ON(tr->num_sock_tags <= 0);
+ tr->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tr, utd_entry);
+
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ list_del(&st_entry->list);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+ /*
+ * Try to free the utd_entry if no other proc_qtu_data is
+ * using it (num_pqd is 0) and it doesn't have active tags
+ * (num_active_tags is 0).
+ */
+ put_utd_entry(utd_entry);
+ }
+
+ rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+ BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+ pqd_entry->parent_tag_data->num_pqd--;
+ put_utd_entry(pqd_entry->parent_tag_data);
+ kfree(pqd_entry);
+ file->private_data = NULL;
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+ current->pid, current->tgid);
+ return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+ .owner = THIS_MODULE,
+ .open = qtudev_open,
+ .release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = QTU_DEV_NAME,
+ .fops = &qtudev_fops,
+ /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+ int ret;
+ *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+ if (!*res_procdir) {
+ pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+ ret = -ENOMEM;
+ goto no_dir;
+ }
+
+ xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms,
+ *res_procdir);
+ if (!xt_qtaguid_ctrl_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+ " file\n");
+ ret = -ENOMEM;
+ goto no_ctrl_entry;
+ }
+ xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
+ xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
+
+ xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms,
+ *res_procdir);
+ if (!xt_qtaguid_stats_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/stats "
+ "file\n");
+ ret = -ENOMEM;
+ goto no_stats_entry;
+ }
+ xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
+ /*
+ * TODO: add support counter hacking
+ * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+ */
+ return 0;
+
+no_stats_entry:
+ remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+ remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+ return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+ /*
+ * This module masquerades as the "owner" module so that iptables
+ * tools can deal with it.
+ */
+ .name = "owner",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .match = qtaguid_mt,
+ .matchsize = sizeof(struct xt_qtaguid_match_info),
+ .me = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+ if (qtaguid_proc_register(&xt_qtaguid_procdir)
+ || iface_stat_init(xt_qtaguid_procdir)
+ || xt_register_match(&qtaguid_mt_reg)
+ || misc_register(&qtu_device))
+ return -1;
+ return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 000000000000..d79f8383abf4
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,333 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do { \
+ if (unlikely(qtaguid_debug_mask & (mask))) \
+ pr_debug(__VA_ARGS__); \
+ } while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a: {acct_tag=1, uid_tag=10003}
+ * b: {acct_tag=2, uid_tag=10003}
+ * c: {acct_tag=3, uid_tag=10003}
+ * d: {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t; /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+ return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+ return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+ return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+ return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+ return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+ return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+ IFS_TX,
+ IFS_RX,
+ IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+ IFS_TCP,
+ IFS_UDP,
+ IFS_PROTO_OTHER,
+ IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+ uint64_t bytes;
+ uint64_t packets;
+};
+
+struct data_counters {
+ struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+ struct rb_node node;
+ tag_t tag;
+};
+
+struct tag_stat {
+ struct tag_node tn;
+ struct data_counters counters;
+ /*
+ * If this tag is acct_tag based, we need to count against the
+ * matching parent uid_tag.
+ */
+ struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+ struct list_head list; /* in iface_stat_list */
+ char *ifname;
+ bool active;
+ /* net_dev is only valid for active iface_stat */
+ struct net_device *net_dev;
+
+ struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+ struct byte_packet_counters totals_via_skb[IFS_MAX_DIRECTIONS];
+ /*
+ * We keep the last_known, because some devices reset their counters
+ * just before NETDEV_UP, while some will reset just before
+ * NETDEV_REGISTER (which is more normal).
+ * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+ * its current dev stats smaller that what was previously known, we
+ * assume an UNREGISTER and just use the last_known.
+ */
+ struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+ /* last_known is usable when last_known_valid is true */
+ bool last_known_valid;
+
+ struct proc_dir_entry *proc_ptr;
+
+ struct rb_root tag_stat_tree;
+ spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+ struct work_struct iface_work;
+ struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+ struct rb_node sock_node;
+ struct sock *sk; /* Only used as a number, never dereferenced */
+ /* The socket is needed for sockfd_put() */
+ struct socket *socket;
+ /* Used to associate with a given pid */
+ struct list_head list; /* in proc_qtu_data.sock_tag_list */
+ pid_t pid;
+
+ tag_t tag;
+};
+
+struct qtaguid_event_counts {
+ /* Various successful events */
+ atomic64_t sockets_tagged;
+ atomic64_t sockets_untagged;
+ atomic64_t counter_set_changes;
+ atomic64_t delete_cmds;
+ atomic64_t iface_events; /* Number of NETDEV_* events handled */
+
+ atomic64_t match_calls; /* Number of times iptables called mt */
+ /* Number of times iptables called mt from pre or post routing hooks */
+ atomic64_t match_calls_prepost;
+ /*
+ * match_found_sk_*: numbers related to the netfilter matching
+ * function finding a sock for the sk_buff.
+ * Total skbs processed is sum(match_found*).
+ */
+ atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
+ /* The connection tracker had or didn't have the sk. */
+ atomic64_t match_found_sk_in_ct;
+ atomic64_t match_found_no_sk_in_ct;
+ /*
+ * No sk could be found. No apparent owner. Could happen with
+ * unsolicited traffic.
+ */
+ atomic64_t match_no_sk;
+ /*
+ * The file ptr in the sk_socket wasn't there.
+ * This might happen for traffic while the socket is being closed.
+ */
+ atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+ struct tag_node tn;
+ int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+ struct rb_node node;
+ uid_t uid;
+
+ /*
+ * For the uid, how many accounting tags have been set.
+ */
+ int num_active_tags;
+ /* Track the number of proc_qtu_data that reference it */
+ int num_pqd;
+ struct rb_root tag_ref_tree;
+ /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+ struct tag_node tn;
+
+ /*
+ * This tracks the number of active sockets that have a tag on them
+ * which matches this tag_ref.tn.tag.
+ * A tag ref can live on after the sockets are untagged.
+ * A tag ref can only be removed during a tag delete command.
+ */
+ int num_sock_tags;
+};
+
+struct proc_qtu_data {
+ struct rb_node node;
+ pid_t pid;
+
+ struct uid_tag_data *parent_tag_data;
+
+ /* Tracks the sock_tags that need freeing upon this proc's death */
+ struct list_head sock_tag_list;
+ /* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 000000000000..8cbd8e42bcc4
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,564 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+ if (IS_ERR_OR_NULL(ptr)) {
+ pr_err("qtaguid: kmalloc failed\n");
+ BUG();
+ }
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+ char *res;
+
+ if (!tag)
+ res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC,
+ "tag_t@%p{tag=0x%llx, uid=%u}",
+ tag, *tag, get_uid_from_tag(*tag));
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ char *res;
+
+ if (!dc)
+ res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+ else if (showValues)
+ res = kasprintf(
+ GFP_ATOMIC, "data_counters@%p{"
+ "set0{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}, "
+ "set1{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}}",
+ dc,
+ dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_RX][IFS_TCP].packets,
+ dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_RX][IFS_UDP].packets,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_TX][IFS_TCP].packets,
+ dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_TX][IFS_UDP].packets,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_RX][IFS_TCP].packets,
+ dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_RX][IFS_UDP].packets,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_TX][IFS_TCP].packets,
+ dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_TX][IFS_UDP].packets,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+ else
+ res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+ char *tag_str;
+ char *res;
+
+ if (!tn) {
+ res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&tn->tag);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_node@%p{tag=%s}",
+ tn, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+ char *tn_str;
+ char *res;
+
+ if (!tr) {
+ res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&tr->tn);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_ref@%p{%s, num_sock_tags=%d}",
+ tr, tn_str, tr->num_sock_tags);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+ char *tn_str;
+ char *counters_str;
+ char *parent_counters_str;
+ char *res;
+
+ if (!ts) {
+ res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&ts->tn);
+ counters_str = pp_data_counters(&ts->counters, true);
+ parent_counters_str = pp_data_counters(ts->parent_counters, false);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+ ts, tn_str, counters_str, parent_counters_str);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ kfree(counters_str);
+ kfree(parent_counters_str);
+ return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+ char *res;
+ if (!is)
+ res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+ "list=list_head{...}, "
+ "ifname=%s, "
+ "total_dev={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "total_skb={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "last_known_valid=%d, "
+ "last_known={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "active=%d, "
+ "net_dev=%p, "
+ "proc_ptr=%p, "
+ "tag_stat_tree=rb_root{...}}",
+ is,
+ is->ifname,
+ is->totals_via_dev[IFS_RX].bytes,
+ is->totals_via_dev[IFS_RX].packets,
+ is->totals_via_dev[IFS_TX].bytes,
+ is->totals_via_dev[IFS_TX].packets,
+ is->totals_via_skb[IFS_RX].bytes,
+ is->totals_via_skb[IFS_RX].packets,
+ is->totals_via_skb[IFS_TX].bytes,
+ is->totals_via_skb[IFS_TX].packets,
+ is->last_known_valid,
+ is->last_known[IFS_RX].bytes,
+ is->last_known[IFS_RX].packets,
+ is->last_known[IFS_TX].bytes,
+ is->last_known[IFS_TX].packets,
+ is->active,
+ is->net_dev,
+ is->proc_ptr);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+ char *tag_str;
+ char *res;
+
+ if (!st) {
+ res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&st->tag);
+ res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+ "sock_node=rb_node{...}, "
+ "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+ "pid=%u, tag=%s}",
+ st, st->sk, st->socket, atomic_long_read(
+ &st->socket->file->f_count),
+ st->pid, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+ char *res;
+
+ if (!utd)
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+ "uid=%u, num_active_acct_tags=%d, "
+ "num_pqd=%d, "
+ "tag_node_tree=rb_root{...}, "
+ "proc_qtu_data_tree=rb_root{...}}",
+ utd, utd->uid,
+ utd->num_active_tags, utd->num_pqd);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ char *parent_tag_data_str;
+ char *res;
+
+ if (!pqd) {
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+ "node=rb_node{...}, pid=%u, "
+ "parent_tag_data=%s, "
+ "sock_tag_list=list_head{...}}",
+ pqd, pqd->pid, parent_tag_data_str
+ );
+ _bug_on_err_or_null(res);
+ kfree(parent_tag_data_str);
+ return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(sock_tag_tree)) {
+ str = "sock_tag_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(sock_tag_list)) {
+ str = "sock_tag_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct proc_qtu_data *proc_qtu_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+ str = "proc_qtu_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "proc_qtu_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(proc_qtu_data_tree);
+ node;
+ node = rb_next(node)) {
+ proc_qtu_data_entry = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ str = pp_proc_qtu_data(proc_qtu_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ indent_level++;
+ prdebug_sock_tag_list(indent_level,
+ &proc_qtu_data_entry->sock_tag_list);
+ indent_level--;
+
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_ref *tag_ref_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_ref_tree)) {
+ str = "tag_ref_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_ref_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_ref_tree);
+ node;
+ node = rb_next(node)) {
+ tag_ref_entry = rb_entry(node,
+ struct tag_ref,
+ tn.node);
+ str = pp_tag_ref(tag_ref_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct uid_tag_data *uid_tag_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+ str = "uid_tag_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "uid_tag_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(uid_tag_data_tree);
+ node;
+ node = rb_next(node)) {
+ uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+ node);
+ str = pp_uid_tag_data(uid_tag_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+ indent_level++;
+ prdebug_tag_ref_tree(indent_level,
+ &uid_tag_data_entry->tag_ref_tree);
+ indent_level--;
+ }
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_stat *ts_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_stat_tree)) {
+ str = "tag_stat_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_stat_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ str = pp_tag_stat(ts_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+ char *str;
+ struct iface_stat *iface_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(iface_stat_list)) {
+ str = "iface_stat_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "iface_stat_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(iface_entry, iface_stat_list, list) {
+ str = pp_iface_stat(iface_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ kfree(str);
+
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+ indent_level++;
+ prdebug_tag_stat_tree(indent_level,
+ &iface_entry->tag_stat_tree);
+ indent_level--;
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+ "netdev_unknown",
+ "NETDEV_UP",
+ "NETDEV_DOWN",
+ "NETDEV_REBOOT",
+ "NETDEV_CHANGE",
+ "NETDEV_REGISTER",
+ "NETDEV_UNREGISTER",
+ "NETDEV_CHANGEMTU",
+ "NETDEV_CHANGEADDR",
+ "NETDEV_GOING_DOWN",
+ "NETDEV_CHANGENAME",
+ "NETDEV_FEAT_CHANGE",
+ "NETDEV_BONDING_FAILOVER",
+ "NETDEV_PRE_UP",
+ "NETDEV_PRE_TYPE_CHANGE",
+ "NETDEV_POST_TYPE_CHANGE",
+ "NETDEV_POST_INIT",
+ "NETDEV_UNREGISTER_BATCH",
+ "NETDEV_RELEASE",
+ "NETDEV_NOTIFY_PEERS",
+ "NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+ if (netdev_event < 0
+ || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+ return "bad event num";
+ return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 000000000000..b63871a0be5a
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+ return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+ return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+ return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+ return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+ return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+ return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+ return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 000000000000..8163f370d04b
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,390 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * netfilter module to enforce network quotas
+ * Sam Johnston <samj@samj.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either
+ * version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+#include <linux/netfilter_ipv4/ipt_ULOG.h>
+#endif
+
+#include <net/netlink.h>
+
+/**
+ * @lock: lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+ u_int64_t quota;
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t ref;
+ char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+ struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+ "Event number for NETLINK_NFLOG message. 0 disables log."
+ "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid = 0;
+static unsigned int quota_list_gid = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+ ulog_packet_msg_t *pm;
+ struct sk_buff *log_skb;
+ size_t size;
+ struct nlmsghdr *nlh;
+
+ if (!qlog_nl_event)
+ return;
+
+ size = NLMSG_SPACE(sizeof(*pm));
+ size = max(size, (size_t)NLMSG_GOODSIZE);
+ log_skb = alloc_skb(size, GFP_ATOMIC);
+ if (!log_skb) {
+ pr_err("xt_quota2: cannot alloc skb for logging\n");
+ return;
+ }
+
+ /* NLMSG_PUT() uses "goto nlmsg_failure" */
+ nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+ sizeof(*pm), 0);
+
+ if (!nlh) {
+ pr_debug("xt_quota2: error during NLMSG_PUT\n");
+ return;
+ }
+
+ pm = nlmsg_data(nlh);
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ pm->data_len = 0;
+ pm->hook = hooknum;
+ if (prefix != NULL)
+ strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+ else
+ *(pm->prefix) = '\0';
+ if (in)
+ strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+ else
+ pm->indev_name[0] = '\0';
+
+ if (out)
+ strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+ else
+ pm->outdev_name[0] = '\0';
+
+ NETLINK_CB(log_skb).dst_group = 1;
+ pr_debug("throwing 1 packets to netlink group 1\n");
+ netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+}
+#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static int quota_proc_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ struct xt_quota_counter *e = data;
+ int ret;
+
+ spin_lock_bh(&e->lock);
+ ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static int quota_proc_write(struct file *file, const char __user *input,
+ unsigned long size, void *data)
+{
+ struct xt_quota_counter *e = data;
+ char buf[sizeof("18446744073709551616")];
+
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, input, size) != 0)
+ return -EFAULT;
+ buf[sizeof(buf)-1] = '\0';
+
+ spin_lock_bh(&e->lock);
+ e->quota = simple_strtoull(buf, NULL, 0);
+ spin_unlock_bh(&e->lock);
+ return size;
+}
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+ struct xt_quota_counter *e;
+ unsigned int size;
+
+ /* Do not need all the procfs things for anonymous counters. */
+ size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+ e = kmalloc(size, GFP_KERNEL);
+ if (e == NULL)
+ return NULL;
+
+ e->quota = q->quota;
+ spin_lock_init(&e->lock);
+ if (!anon) {
+ INIT_LIST_HEAD(&e->list);
+ atomic_set(&e->ref, 1);
+ strlcpy(e->name, q->name, sizeof(e->name));
+ }
+ return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name: name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+ struct proc_dir_entry *p;
+ struct xt_quota_counter *e = NULL;
+ struct xt_quota_counter *new_e;
+
+ if (*q->name == '\0')
+ return q2_new_counter(q, true);
+
+ /* No need to hold a lock while getting a new counter */
+ new_e = q2_new_counter(q, false);
+ if (new_e == NULL)
+ goto out;
+
+ spin_lock_bh(&counter_list_lock);
+ list_for_each_entry(e, &counter_list, list)
+ if (strcmp(e->name, q->name) == 0) {
+ atomic_inc(&e->ref);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(new_e);
+ pr_debug("xt_quota2: old counter name=%s", e->name);
+ return e;
+ }
+ e = new_e;
+ pr_debug("xt_quota2: new_counter name=%s", e->name);
+ list_add_tail(&e->list, &counter_list);
+ /* The entry having a refcount of 1 is not directly destructible.
+ * This func has not yet returned the new entry, thus iptables
+ * has not references for destroying this entry.
+ * For another rule to try to destroy it, it would 1st need for this
+ * func* to be re-invoked, acquire a new ref for the same named quota.
+ * Nobody will access the e->procfs_entry either.
+ * So release the lock. */
+ spin_unlock_bh(&counter_list_lock);
+
+ /* create_proc_entry() is not spin_lock happy */
+ p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
+ proc_xt_quota);
+
+ if (IS_ERR_OR_NULL(p)) {
+ spin_lock_bh(&counter_list_lock);
+ list_del(&e->list);
+ spin_unlock_bh(&counter_list_lock);
+ goto out;
+ }
+ p->data = e;
+ p->read_proc = quota_proc_read;
+ p->write_proc = quota_proc_write;
+ p->uid = quota_list_uid;
+ p->gid = quota_list_gid;
+ return e;
+
+ out:
+ kfree(e);
+ return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+ pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+ if (q->flags & ~XT_QUOTA_MASK)
+ return -EINVAL;
+
+ q->name[sizeof(q->name)-1] = '\0';
+ if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+ printk(KERN_ERR "xt_quota.3: illegal name\n");
+ return -EINVAL;
+ }
+
+ q->master = q2_get_counter(q);
+ if (q->master == NULL) {
+ printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+
+ if (*q->name == '\0') {
+ kfree(e);
+ return;
+ }
+
+ spin_lock_bh(&counter_list_lock);
+ if (!atomic_dec_and_test(&e->ref)) {
+ spin_unlock_bh(&counter_list_lock);
+ return;
+ }
+
+ list_del(&e->list);
+ remove_proc_entry(e->name, proc_xt_quota);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+ bool ret = q->flags & XT_QUOTA_INVERT;
+
+ spin_lock_bh(&e->lock);
+ if (q->flags & XT_QUOTA_GROW) {
+ /*
+ * While no_change is pointless in "grow" mode, we will
+ * implement it here simply to have a consistent behavior.
+ */
+ if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+ e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ }
+ ret = true;
+ } else {
+ if (e->quota >= skb->len) {
+ if (!(q->flags & XT_QUOTA_NO_CHANGE))
+ e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ ret = !ret;
+ } else {
+ /* We are transitioning, log that fact. */
+ if (e->quota) {
+ quota2_log(par->hooknum,
+ skb,
+ par->in,
+ par->out,
+ q->name);
+ }
+ /* we do not allow even small packets from now on */
+ e->quota = 0;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV4,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV6,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init quota_mt2_init(void)
+{
+ int ret;
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ };
+#endif
+ pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+ nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
+ if (!nflognl)
+ return -ENOMEM;
+#endif
+
+ proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+ if (proc_xt_quota == NULL)
+ return -EACCES;
+
+ ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ if (ret < 0)
+ remove_proc_entry("xt_quota", init_net.proc_net);
+ pr_debug("xt_quota2: init() %d", ret);
+ return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+ xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 63b2bdb59e95..160227fe9d50 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
#include <net/netfilter/nf_conntrack.h>
#endif
-static void
+void
xt_socket_put_sk(struct sock *sk)
{
if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk)
else
sock_put(sk);
}
+EXPORT_SYMBOL(xt_socket_put_sk);
static int
extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb,
return 0;
}
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
- const struct xt_socket_mtinfo1 *info)
+struct sock*
+xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct iphdr *iph = ip_hdr(skb);
struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
hp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
protocol = iph->protocol;
saddr = iph->saddr;
@@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
} else if (iph->protocol == IPPROTO_ICMP) {
if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
&sport, &dport))
- return false;
+ return NULL;
} else {
- return false;
+ return NULL;
}
#ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+
+ pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
+ protocol, &saddr, ntohs(sport),
+ &daddr, ntohs(dport),
+ &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+
+ return sk;
+}
+EXPORT_SYMBOL(xt_socket_get4_sk);
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+ const struct xt_socket_mtinfo1 *info)
+{
+ struct sock *sk;
+
+ sk = xt_socket_get4_sk(skb, par);
if (sk != NULL) {
bool wildcard;
bool transparent = true;
@@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
sk = NULL;
}
- pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
- protocol, &saddr, ntohs(sport),
- &daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
return (sk != NULL);
}
@@ -255,8 +267,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 0;
}
-static bool
-socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+struct sock*
+xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
struct udphdr _hdr, *hp = NULL;
@@ -276,7 +288,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
hp = skb_header_pointer(skb, thoff,
sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
saddr = &iph->saddr;
sport = hp->source;
@@ -286,13 +298,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
} else if (tproto == IPPROTO_ICMPV6) {
if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
&sport, &dport))
- return false;
+ return NULL;
} else {
- return false;
+ return NULL;
}
sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+ pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
+ "(orig %pI6:%hu) sock %p\n",
+ tproto, saddr, ntohs(sport),
+ daddr, ntohs(dport),
+ &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+ return sk;
+}
+EXPORT_SYMBOL(xt_socket_get6_sk);
+
+static bool
+socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct sock *sk;
+ const struct xt_socket_mtinfo1 *info;
+
+ info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+ sk = xt_socket_get6_sk(skb, par);
if (sk != NULL) {
bool wildcard;
bool transparent = true;
@@ -315,12 +344,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
sk = NULL;
}
- pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
- "(orig %pI6:%hu) sock %p\n",
- tproto, saddr, ntohs(sport),
- daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
return (sk != NULL);
}
#endif
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe895b663..8e12c8a2b82b 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@ menuconfig RFKILL
To compile this driver as a module, choose M here: the
module will be called rfkill.
+config RFKILL_PM
+ bool "Power off on suspend"
+ depends on RFKILL && PM
+ default y
+
# LED trigger support
config RFKILL_LEDS
bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 9b9be5279f5d..47982afe2d75 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -790,6 +790,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_pause_polling);
+#ifdef CONFIG_RFKILL_PM
void rfkill_resume_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
@@ -824,14 +825,17 @@ static int rfkill_resume(struct device *dev)
return 0;
}
+#endif
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_attrs = rfkill_dev_attrs,
.dev_uevent = rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
.suspend = rfkill_suspend,
.resume = rfkill_resume,
+#endif
};
bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 1915ffe598e3..507b5e84fbdb 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -555,7 +555,7 @@ EXPORT_SYMBOL_GPL(rpc_clone_client);
* rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
*
* @clnt: RPC client whose parameters are copied
- * @auth: security flavor for new client
+ * @flavor: security flavor for new client
*
* Returns a fresh RPC client or an ERR_PTR.
*/
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b4133bd13915..bfa31714581f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -972,8 +972,7 @@ static void rpc_async_release(struct work_struct *work)
static void rpc_release_resources_task(struct rpc_task *task)
{
- if (task->tk_rqstp)
- xprt_release(task);
+ xprt_release(task);
if (task->tk_msg.rpc_cred) {
put_rpccred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index bd462a532acf..33811db8788a 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
void xprt_release(struct rpc_task *task)
{
struct rpc_xprt *xprt;
- struct rpc_rqst *req;
+ struct rpc_rqst *req = task->tk_rqstp;
- if (!(req = task->tk_rqstp))
+ if (req == NULL) {
+ if (task->tk_client) {
+ rcu_read_lock();
+ xprt = rcu_dereference(task->tk_client->cl_xprt);
+ if (xprt->snd_task == task)
+ xprt_release_write(xprt, task);
+ rcu_read_unlock();
+ }
return;
+ }
xprt = req->rq_xprt;
if (task->tk_ops->rpc_count_stats != NULL)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 16d08b399210..4c602d100480 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -166,3 +166,14 @@ config LIB80211_DEBUG
from lib80211.
If unsure, say N.
+
+config CFG80211_ALLOW_RECONNECT
+ bool "Allow reconnect while already connected"
+ depends on CFG80211
+ default n
+ help
+ cfg80211 stack doesn't allow to connect if you are already
+ connected. This option allows to make a connection in this case.
+
+ Select this option ONLY for wlan drivers that are specifically
+ built for such purposes.
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 14d990400354..b677eab55b68 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -866,8 +866,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
/* allow mac80211 to determine the timeout */
wdev->ps_timeout = -1;
- if (!dev->ethtool_ops)
- dev->ethtool_ops = &cfg80211_ethtool_ops;
+ netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
if ((wdev->iftype == NL80211_IFTYPE_STATION ||
wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7d4789..e0114003b016 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -19,7 +19,7 @@
#include "wext-compat.h"
#include "rdev-ops.h"
-#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ)
static void bss_release(struct kref *ref)
{
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f2431e41a373..d288ee895a60 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -700,8 +700,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
if (wdev->sme_state != CFG80211_SME_CONNECTED)
return;
+#endif
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
@@ -778,10 +780,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
if (wdev->sme_state != CFG80211_SME_IDLE)
return -EALREADY;
if (WARN_ON(wdev->connect_keys)) {
+#else
+ if (wdev->connect_keys) {
+#endif
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
}
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index acb86507828a..ed79ca3da88d 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -12,6 +12,43 @@
set -e
+# Attempt to find the correct Debian architecture
+forcearch=""
+debarch=""
+case "$UTS_MACHINE" in
+i386|ia64|alpha)
+ debarch="$UTS_MACHINE" ;;
+x86_64)
+ debarch=amd64 ;;
+sparc*)
+ debarch=sparc ;;
+s390*)
+ debarch=s390 ;;
+ppc*)
+ debarch=powerpc ;;
+parisc*)
+ debarch=hppa ;;
+mips*)
+ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
+arm*)
+ debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
+*)
+ echo "" >&2
+ echo "** ** ** WARNING ** ** **" >&2
+ echo "" >&2
+ echo "Your architecture doesn't have it's equivalent" >&2
+ echo "Debian userspace architecture defined!" >&2
+ echo "Falling back to using your current userspace instead!" >&2
+ echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
+ echo "" >&2
+esac
+if [ -n "$KBUILD_DEBARCH" ] ; then
+ debarch="$KBUILD_DEBARCH"
+fi
+if [ -n "$debarch" ] ; then
+ forcearch="-DArchitecture=$debarch"
+fi
+
create_package() {
local pname="$1" pdir="$2"
@@ -25,42 +62,6 @@ create_package() {
chown -R root:root "$pdir"
chmod -R go-w "$pdir"
- # Attempt to find the correct Debian architecture
- local forcearch="" debarch=""
- case "$UTS_MACHINE" in
- i386|ia64|alpha)
- debarch="$UTS_MACHINE" ;;
- x86_64)
- debarch=amd64 ;;
- sparc*)
- debarch=sparc ;;
- s390*)
- debarch=s390 ;;
- ppc*)
- debarch=powerpc ;;
- parisc*)
- debarch=hppa ;;
- mips*)
- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
- arm*)
- debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
- *)
- echo "" >&2
- echo "** ** ** WARNING ** ** **" >&2
- echo "" >&2
- echo "Your architecture doesn't have it's equivalent" >&2
- echo "Debian userspace architecture defined!" >&2
- echo "Falling back to using your current userspace instead!" >&2
- echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
- echo "" >&2
- esac
- if [ -n "$KBUILD_DEBARCH" ] ; then
- debarch="$KBUILD_DEBARCH"
- fi
- if [ -n "$debarch" ] ; then
- forcearch="-DArchitecture=$debarch"
- fi
-
# Create the package
dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
dpkg --build "$pdir" ..
@@ -252,15 +253,14 @@ mkdir -p "$destdir"
(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
cat <<EOF >> debian/control
Package: $kernel_headers_packagename
Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: $debarch
+Description: Linux kernel headers for $KERNELRELEASE on $debarch
+ This package provides kernel header files for $KERNELRELEASE on $debarch
.
This is useful for people who need to build external modules
EOF
@@ -281,6 +281,12 @@ EOF
create_package "$fwpackagename" "$fwdir"
fi
+# Copy device tree files if generated
+stat arch/$ARCH/boot/dts/*.dtb && {
+ mkdir -p "$tmpdir/lib/firmware/$version/device-tree"
+ cp arch/$ARCH/boot/dts/*.dtb "$tmpdir/lib/firmware/$version/device-tree"
+}
+
cat <<EOF >> debian/control
Package: $libc_headers_packagename
diff --git a/security/commoncap.c b/security/commoncap.c
index 7ee08c756d6b..e4bfa20fe1e5 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -31,6 +31,10 @@
#include <linux/binfmts.h>
#include <linux/personality.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -78,6 +82,13 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
{
struct user_namespace *ns = targ_ns;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+ return 0;
+ if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+ return 0;
+#endif
+
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
* user namespace's parents.
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index 6fc0ae90e5b1..fff7753e35c1 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <sound/ac97_codec.h>
#include <sound/pxa2xx-lib.h>
@@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
static inline void pxa_ac97_cold_pxa27x(void)
{
+ unsigned int timeout;
+
GCR &= GCR_COLD_RST; /* clear everything but nCRST */
GCR &= ~GCR_COLD_RST; /* then assert nCRST */
@@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
clk_enable(ac97conf_clk);
udelay(5);
clk_disable(ac97conf_clk);
- GCR = GCR_COLD_RST;
- udelay(50);
+ GCR = GCR_COLD_RST | GCR_WARM_RST;
+ timeout = 100; /* wait for the codec-ready bit to be set */
+ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+ mdelay(1);
}
#endif
@@ -340,8 +345,21 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
}
if (cpu_is_pxa27x()) {
- /* Use GPIO 113 as AC97 Reset on Bulverde */
+ /*
+ * This gpio is needed for a work-around to a bug in the ac97
+ * controller during warm reset. The direction and level is set
+ * here so that it is an output driven high when switching from
+ * AC97_nRESET alt function to generic gpio.
+ */
+ ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
+ "pxa27x ac97 reset");
+ if (ret < 0) {
+ pr_err("%s: gpio_request_one() failed: %d\n",
+ __func__, ret);
+ goto err_conf;
+ }
pxa27x_assert_ac97reset(reset_gpio, 0);
+
ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
if (IS_ERR(ac97conf_clk)) {
ret = PTR_ERR(ac97conf_clk);
@@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
void pxa2xx_ac97_hw_remove(struct platform_device *dev)
{
+ if (cpu_is_pxa27x())
+ gpio_free(reset_gpio);
GCR |= GCR_ACLINK_OFF;
free_irq(IRQ_AC97, NULL);
if (ac97conf_clk) {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cca87277baf0..0b6aebacc56b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -573,9 +573,12 @@ enum {
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
/* quirks for Intel PCH */
-#define AZX_DCAPS_INTEL_PCH \
+#define AZX_DCAPS_INTEL_PCH_NOPM \
(AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
- AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
+ AZX_DCAPS_COUNT_LPIB_DELAY)
+
+#define AZX_DCAPS_INTEL_PCH \
+ (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
@@ -3586,13 +3589,13 @@ static void azx_remove(struct pci_dev *pci)
static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* PBG */
{ PCI_DEVICE(0x8086, 0x1d20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* Panther Point */
{ PCI_DEVICE(0x8086, 0x1e20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* Lynx Point */
{ PCI_DEVICE(0x8086, 0x8c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 60890bfecc19..dd798c3196ff 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -558,24 +558,12 @@ static int conexant_build_controls(struct hda_codec *codec)
return 0;
}
-#ifdef CONFIG_PM
-static int conexant_suspend(struct hda_codec *codec)
-{
- snd_hda_shutup_pins(codec);
- return 0;
-}
-#endif
-
static const struct hda_codec_ops conexant_patch_ops = {
.build_controls = conexant_build_controls,
.build_pcms = conexant_build_pcms,
.init = conexant_init,
.free = conexant_free,
.set_power_state = conexant_set_power,
-#ifdef CONFIG_PM
- .suspend = conexant_suspend,
-#endif
- .reboot_notify = snd_hda_shutup_pins,
};
#ifdef CONFIG_SND_HDA_INPUT_BEEP
@@ -4405,10 +4393,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
.init = cx_auto_init,
.free = conexant_free,
.unsol_event = snd_hda_jack_unsol_event,
-#ifdef CONFIG_PM
- .suspend = conexant_suspend,
-#endif
- .reboot_notify = snd_hda_shutup_pins,
};
/*
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b6c21ea187ca..807a2aa1ff38 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1502,7 +1502,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
substream = snd_pcm_chmap_substream(info, ctl_idx);
if (!substream || !substream->runtime)
- return -EBADFD;
+ return 0; /* just for avoiding error from alsactl restore */
switch (substream->runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_SETUP:
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 71ae23dd7103..f5196277b6e9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5817,6 +5817,9 @@ enum {
ALC269_TYPE_ALC269VB,
ALC269_TYPE_ALC269VC,
ALC269_TYPE_ALC269VD,
+ ALC269_TYPE_ALC280,
+ ALC269_TYPE_ALC282,
+ ALC269_TYPE_ALC284,
};
/*
@@ -5833,10 +5836,13 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
switch (spec->codec_variant) {
case ALC269_TYPE_ALC269VA:
case ALC269_TYPE_ALC269VC:
+ case ALC269_TYPE_ALC280:
+ case ALC269_TYPE_ALC284:
ssids = alc269va_ssids;
break;
case ALC269_TYPE_ALC269VB:
case ALC269_TYPE_ALC269VD:
+ case ALC269_TYPE_ALC282:
ssids = alc269_ssids;
break;
default:
@@ -6400,7 +6406,8 @@ static int patch_alc269(struct hda_codec *codec)
alc_auto_parse_customize_define(codec);
- if (codec->vendor_id == 0x10ec0269) {
+ switch (codec->vendor_id) {
+ case 0x10ec0269:
spec->codec_variant = ALC269_TYPE_ALC269VA;
switch (alc_get_coef0(codec) & 0x00f0) {
case 0x0010:
@@ -6425,6 +6432,20 @@ static int patch_alc269(struct hda_codec *codec)
goto error;
spec->init_hook = alc269_fill_coef;
alc269_fill_coef(codec);
+ break;
+
+ case 0x10ec0280:
+ case 0x10ec0290:
+ spec->codec_variant = ALC269_TYPE_ALC280;
+ break;
+ case 0x10ec0282:
+ case 0x10ec0283:
+ spec->codec_variant = ALC269_TYPE_ALC282;
+ break;
+ case 0x10ec0284:
+ case 0x10ec0292:
+ spec->codec_variant = ALC269_TYPE_ALC284;
+ break;
}
/* automatic parse from the BIOS config */
@@ -7129,6 +7150,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
{ .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
+ { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
{ .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 6e02e064d7b4..223c3d9cc69e 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -441,6 +441,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
*/
/* status */
#define HDSPM_AES32_wcLock 0x0200000
+#define HDSPM_AES32_wcSync 0x0100000
#define HDSPM_AES32_wcFreq_bit 22
/* (status >> HDSPM_AES32_wcFreq_bit) & 0xF gives WC frequency (cf function
HDSPM_bit2freq */
@@ -3467,10 +3468,12 @@ static int hdspm_wc_sync_check(struct hdspm *hdspm)
switch (hdspm->io_type) {
case AES32:
status = hdspm_read(hdspm, HDSPM_statusRegister);
- if (status & HDSPM_wcSync)
- return 2;
- else if (status & HDSPM_wcLock)
- return 1;
+ if (status & HDSPM_AES32_wcLock) {
+ if (status & HDSPM_AES32_wcSync)
+ return 2;
+ else
+ return 1;
+ }
return 0;
break;
@@ -4658,6 +4661,7 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
unsigned int status;
unsigned int status2;
unsigned int timecode;
+ unsigned int wcLock, wcSync;
int pref_syncref;
char *autosync_ref;
int x;
@@ -4751,8 +4755,11 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
snd_iprintf(buffer, "--- Status:\n");
+ wcLock = status & HDSPM_AES32_wcLock;
+ wcSync = wcLock && (status & HDSPM_AES32_wcSync);
+
snd_iprintf(buffer, "Word: %s Frequency: %d\n",
- (status & HDSPM_AES32_wcLock) ? "Sync " : "No Lock",
+ (wcLock) ? (wcSync ? "Sync " : "Lock ") : "No Lock",
HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF));
for (x = 0; x < 8; x++) {
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index adf397b9d0e6..1d8bb5917594 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -446,15 +446,9 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_DSP_A:
mode = 0;
break;
- case SND_SOC_DAIFMT_DSP_B:
- mode = 1;
- break;
case SND_SOC_DAIFMT_I2S:
mode = 2;
break;
- case SND_SOC_DAIFMT_LEFT_J:
- mode = 3;
- break;
default:
arizona_aif_err(dai, "Unsupported DAI format %d\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -714,7 +708,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
- ARIZONA_AIF1_RATE_MASK, 8);
+ ARIZONA_AIF1_RATE_MASK,
+ 8 << ARIZONA_AIF1_RATE_SHIFT);
break;
default:
arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 41dae1ed3b71..4deebeb07177 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -34,15 +34,15 @@
#define ARIZONA_FLL_SRC_MCLK1 0
#define ARIZONA_FLL_SRC_MCLK2 1
-#define ARIZONA_FLL_SRC_SLIMCLK 2
-#define ARIZONA_FLL_SRC_FLL1 3
-#define ARIZONA_FLL_SRC_FLL2 4
-#define ARIZONA_FLL_SRC_AIF1BCLK 5
-#define ARIZONA_FLL_SRC_AIF2BCLK 6
-#define ARIZONA_FLL_SRC_AIF3BCLK 7
-#define ARIZONA_FLL_SRC_AIF1LRCLK 8
-#define ARIZONA_FLL_SRC_AIF2LRCLK 9
-#define ARIZONA_FLL_SRC_AIF3LRCLK 10
+#define ARIZONA_FLL_SRC_SLIMCLK 3
+#define ARIZONA_FLL_SRC_FLL1 4
+#define ARIZONA_FLL_SRC_FLL2 5
+#define ARIZONA_FLL_SRC_AIF1BCLK 8
+#define ARIZONA_FLL_SRC_AIF2BCLK 9
+#define ARIZONA_FLL_SRC_AIF3BCLK 10
+#define ARIZONA_FLL_SRC_AIF1LRCLK 12
+#define ARIZONA_FLL_SRC_AIF2LRCLK 13
+#define ARIZONA_FLL_SRC_AIF3LRCLK 14
#define ARIZONA_MIXER_VOL_MASK 0x00FE
#define ARIZONA_MIXER_VOL_SHIFT 1
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 4f1127935fdf..ac8742a1f25a 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -474,16 +474,16 @@ static int cs4271_probe(struct snd_soc_codec *codec)
struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
int ret;
int gpio_nreset = -EINVAL;
- int amutec_eq_bmutec = 0;
+ bool amutec_eq_bmutec = false;
#ifdef CONFIG_OF
if (of_match_device(cs4271_dt_ids, codec->dev)) {
gpio_nreset = of_get_named_gpio(codec->dev->of_node,
"reset-gpio", 0);
- if (!of_get_property(codec->dev->of_node,
+ if (of_get_property(codec->dev->of_node,
"cirrus,amutec-eq-bmutec", NULL))
- amutec_eq_bmutec = 1;
+ amutec_eq_bmutec = true;
}
#endif
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 99bb1c69499e..9811a5478c87 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -737,7 +737,7 @@ static const struct cs42l52_clk_para clk_map_table[] = {
static int cs42l52_get_clk(int mclk, int rate)
{
- int i, ret = 0;
+ int i, ret = -EINVAL;
u_int mclk1, mclk2 = 0;
for (i = 0; i < ARRAY_SIZE(clk_map_table); i++) {
@@ -749,8 +749,6 @@ static int cs42l52_get_clk(int mclk, int rate)
}
}
}
- if (ret > ARRAY_SIZE(clk_map_table))
- return -EINVAL;
return ret;
}
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index d75257d40a49..e19490cfb3a8 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -111,9 +111,9 @@ static struct reg_default lm49453_reg_defs[] = {
{ 101, 0x00 },
{ 102, 0x00 },
{ 103, 0x01 },
- { 105, 0x01 },
- { 106, 0x00 },
- { 107, 0x01 },
+ { 104, 0x01 },
+ { 105, 0x00 },
+ { 106, 0x01 },
{ 107, 0x00 },
{ 108, 0x00 },
{ 109, 0x00 },
@@ -163,56 +163,25 @@ static struct reg_default lm49453_reg_defs[] = {
{ 184, 0x00 },
{ 185, 0x00 },
{ 186, 0x00 },
- { 189, 0x00 },
+ { 187, 0x00 },
{ 188, 0x00 },
- { 194, 0x00 },
- { 195, 0x00 },
- { 196, 0x00 },
- { 197, 0x00 },
- { 200, 0x00 },
- { 201, 0x00 },
- { 202, 0x00 },
- { 203, 0x00 },
- { 204, 0x00 },
- { 205, 0x00 },
- { 208, 0x00 },
+ { 189, 0x00 },
+ { 208, 0x06 },
{ 209, 0x00 },
- { 210, 0x00 },
- { 211, 0x00 },
- { 213, 0x00 },
- { 214, 0x00 },
- { 215, 0x00 },
- { 216, 0x00 },
- { 217, 0x00 },
- { 218, 0x00 },
- { 219, 0x00 },
+ { 210, 0x08 },
+ { 211, 0x54 },
+ { 212, 0x14 },
+ { 213, 0x0d },
+ { 214, 0x0d },
+ { 215, 0x14 },
+ { 216, 0x60 },
{ 221, 0x00 },
{ 222, 0x00 },
+ { 223, 0x00 },
{ 224, 0x00 },
- { 225, 0x00 },
- { 226, 0x00 },
- { 227, 0x00 },
- { 228, 0x00 },
- { 229, 0x00 },
- { 230, 0x13 },
- { 231, 0x00 },
- { 232, 0x80 },
- { 233, 0x0C },
- { 234, 0xDD },
- { 235, 0x00 },
- { 236, 0x04 },
- { 237, 0x00 },
- { 238, 0x00 },
- { 239, 0x00 },
- { 240, 0x00 },
- { 241, 0x00 },
- { 242, 0x00 },
- { 243, 0x00 },
- { 244, 0x00 },
- { 245, 0x00 },
{ 248, 0x00 },
{ 249, 0x00 },
- { 254, 0x00 },
+ { 250, 0x00 },
{ 255, 0x00 },
};
@@ -525,36 +494,41 @@ SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_PORT2_TX2_REG, 7, 1, 0),
};
/* TLV Declarations */
-static const DECLARE_TLV_DB_SCALE(digital_tlv, -7650, 150, 1);
-static const DECLARE_TLV_DB_SCALE(port_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adc_dac_tlv, -7650, 150, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 200, 1);
+static const DECLARE_TLV_DB_SCALE(port_tlv, -1800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(stn_tlv, -7200, 150, 0);
static const struct snd_kcontrol_new lm49453_sidetone_mixer_controls[] = {
/* Sidetone supports mono only */
SOC_DAPM_SINGLE_TLV("Sidetone ADCL Volume", LM49453_P0_STN_VOL_ADCL_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone ADCR Volume", LM49453_P0_STN_VOL_ADCR_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC1L Volume", LM49453_P0_STN_VOL_DMIC1L_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC1R Volume", LM49453_P0_STN_VOL_DMIC1R_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC2L Volume", LM49453_P0_STN_VOL_DMIC2L_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC2R Volume", LM49453_P0_STN_VOL_DMIC2R_REG,
- 0, 0x3F, 0, digital_tlv),
+ 0, 0x3F, 0, stn_tlv),
};
static const struct snd_kcontrol_new lm49453_snd_controls[] = {
/* mic1 and mic2 supports mono only */
- SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_ADC_LEVELL_REG, 0, 6,
- 0, digital_tlv),
- SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_ADC_LEVELR_REG, 0, 6,
- 0, digital_tlv),
+ SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_MICL_REG, 0, 15, 0, mic_tlv),
+ SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_MICR_REG, 0, 15, 0, mic_tlv),
+
+ SOC_SINGLE_TLV("ADCL Volume", LM49453_P0_ADC_LEVELL_REG, 0, 63,
+ 0, adc_dac_tlv),
+ SOC_SINGLE_TLV("ADCR Volume", LM49453_P0_ADC_LEVELR_REG, 0, 63,
+ 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DMIC1 Volume", LM49453_P0_DMIC1_LEVELL_REG,
- LM49453_P0_DMIC1_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DMIC1_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DMIC2 Volume", LM49453_P0_DMIC2_LEVELL_REG,
- LM49453_P0_DMIC2_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DMIC2_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DAPM_ENUM("Mic2Mode", lm49453_mic2mode_enum),
SOC_DAPM_ENUM("DMIC12 SRC", lm49453_dmic12_cfg_enum),
@@ -569,16 +543,16 @@ static const struct snd_kcontrol_new lm49453_snd_controls[] = {
2, 1, 0),
SOC_DOUBLE_R_TLV("DAC HP Volume", LM49453_P0_DAC_HP_LEVELL_REG,
- LM49453_P0_DAC_HP_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DAC_HP_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC LO Volume", LM49453_P0_DAC_LO_LEVELL_REG,
- LM49453_P0_DAC_LO_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DAC_LO_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC LS Volume", LM49453_P0_DAC_LS_LEVELL_REG,
- LM49453_P0_DAC_LS_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DAC_LS_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC HA Volume", LM49453_P0_DAC_HA_LEVELL_REG,
- LM49453_P0_DAC_HA_LEVELR_REG, 0, 6, 0, digital_tlv),
+ LM49453_P0_DAC_HA_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_SINGLE_TLV("EP Volume", LM49453_P0_DAC_LS_LEVELL_REG,
- 0, 6, 0, digital_tlv),
+ 0, 63, 0, adc_dac_tlv),
SOC_SINGLE_TLV("PORT1_1_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
0, 3, 0, port_tlv),
@@ -1218,7 +1192,7 @@ static int lm49453_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
}
snd_soc_update_bits(codec, LM49453_P0_AUDIO_PORT1_BASIC_REG,
- LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(1)|BIT(5),
+ LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(0)|BIT(5),
(aif_val | mode | clk_phase));
snd_soc_write(codec, LM49453_P0_AUDIO_PORT1_RX_MSB_REG, clk_shift);
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index cb1675cd8e1c..92bbfec9b107 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -401,7 +401,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
5, 1, 0),
SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
- 0, 4, 0, mic_gain_tlv),
+ 0, 3, 0, mic_gain_tlv),
};
/* mute the codec used by alsa core */
@@ -1344,7 +1344,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
SGTL5000_HP_ZCD_EN |
SGTL5000_ADC_ZCD_EN);
- snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 0);
+ snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
/*
* disable DAP
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index ab355c4f0b2d..40c07be9b581 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -74,9 +74,10 @@
SNDRV_PCM_FMTBIT_S32_LE)
#define S2PC_VALUE 0x98
#define CLOCK_OUT 0x60
-#define LEFT_J_DATA_FORMAT 0x10
-#define I2S_DATA_FORMAT 0x12
-#define RIGHT_J_DATA_FORMAT 0x14
+#define DATA_FORMAT_MSK 0x0E
+#define LEFT_J_DATA_FORMAT 0x00
+#define I2S_DATA_FORMAT 0x02
+#define RIGHT_J_DATA_FORMAT 0x04
#define CODEC_MUTE_VAL 0x80
#define POWER_CNTLMSAK 0x40
@@ -289,7 +290,7 @@ static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
return -EINVAL;
}
- snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
+ snd_soc_update_bits(codec, STA529_S2PCFG0, DATA_FORMAT_MSK, mode);
return 0;
}
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 1cbe88f01d63..12bcae63a7f0 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -209,9 +209,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
if (wm2000->speech_clarity)
- ret &= ~WM2000_SPEECH_CLARITY;
- else
ret |= WM2000_SPEECH_CLARITY;
+ else
+ ret &= ~WM2000_SPEECH_CLARITY;
wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index afcf31df77e0..e6cefe1ac677 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -1566,15 +1566,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_DSP_A:
fmt_val = 0;
break;
- case SND_SOC_DAIFMT_DSP_B:
- fmt_val = 1;
- break;
case SND_SOC_DAIFMT_I2S:
fmt_val = 2;
break;
- case SND_SOC_DAIFMT_LEFT_J:
- fmt_val = 3;
- break;
default:
dev_err(codec->dev, "Unsupported DAI format %d\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -1626,7 +1620,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
lrclk);
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
- WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
+ WM2200_AIF1_FMT_MASK, fmt_val);
return 0;
}
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 5a5f36936235..54397a508073 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_DSP_A:
mask = 0;
break;
- case SND_SOC_DAIFMT_DSP_B:
- mask = 1;
- break;
case SND_SOC_DAIFMT_I2S:
mask = 2;
break;
- case SND_SOC_DAIFMT_LEFT_J:
- mask = 3;
- break;
default:
dev_err(codec->dev, "Unsupported DAI format %d\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 688ade080589..7a9048dad1cd 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -36,6 +36,9 @@
struct wm5102_priv {
struct arizona_priv core;
struct arizona_fll fll[2];
+
+ unsigned int spk_ena:2;
+ unsigned int spk_ena_pending:1;
};
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
@@ -787,6 +790,47 @@ ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
};
+static int wm5102_spk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct wm5102_priv *wm5102 = snd_soc_codec_get_drvdata(codec);
+
+ if (arizona->rev < 1)
+ return 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (!wm5102->spk_ena) {
+ snd_soc_write(codec, 0x4f5, 0x25a);
+ wm5102->spk_ena_pending = true;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ if (wm5102->spk_ena_pending) {
+ msleep(75);
+ snd_soc_write(codec, 0x4f5, 0xda);
+ wm5102->spk_ena_pending = false;
+ wm5102->spk_ena++;
+ }
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ wm5102->spk_ena--;
+ if (!wm5102->spk_ena)
+ snd_soc_write(codec, 0x4f5, 0x25a);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!wm5102->spk_ena)
+ snd_soc_write(codec, 0x4f5, 0x0da);
+ break;
+ }
+
+ return 0;
+}
+
+
ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
@@ -1034,10 +1078,10 @@ SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT4L", ARIZONA_OUTPUT_ENABLES_1,
- ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT4R", ARIZONA_OUTPUT_ENABLES_1,
- ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index ffc89fab96fb..7b198c38f3ef 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -169,6 +169,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
const struct wm_adsp_region *mem;
const char *region_name;
char *file, *text;
+ void *buf;
unsigned int reg;
int regions = 0;
int ret, offset, type, sizes;
@@ -322,8 +323,18 @@ static int wm_adsp_load(struct wm_adsp *dsp)
}
if (reg) {
- ret = regmap_raw_write(regmap, reg, region->data,
+ buf = kmemdup(region->data, le32_to_cpu(region->len),
+ GFP_KERNEL);
+ if (!buf) {
+ adsp_err(dsp, "Out of memory\n");
+ return -ENOMEM;
+ }
+
+ ret = regmap_raw_write(regmap, reg, buf,
le32_to_cpu(region->len));
+
+ kfree(buf);
+
if (ret != 0) {
adsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -359,6 +370,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
const char *region_name;
int ret, pos, blocks, type, offset, reg;
char *file;
+ void *buf;
file = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (file == NULL)
@@ -426,6 +438,13 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
}
if (reg) {
+ buf = kmemdup(blk->data, le32_to_cpu(blk->len),
+ GFP_KERNEL);
+ if (!buf) {
+ adsp_err(dsp, "Out of memory\n");
+ return -ENOMEM;
+ }
+
ret = regmap_raw_write(regmap, reg, blk->data,
le32_to_cpu(blk->len));
if (ret != 0) {
@@ -433,6 +452,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
"%s.%d: Failed to write to %x in %s\n",
file, blocks, reg, region_name);
}
+
+ kfree(buf);
}
pos += le32_to_cpu(blk->len) + sizeof(*blk);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 91d592ff67b7..2370063b5824 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1255,6 +1255,8 @@ static int soc_post_component_init(struct snd_soc_card *card,
INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
ret = device_add(rtd->dev);
if (ret < 0) {
+ /* calling put_device() here to free the rtd->dev */
+ put_device(rtd->dev);
dev_err(card->dev,
"ASoC: failed to register runtime device: %d\n", ret);
return ret;
@@ -1554,7 +1556,7 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
/* unregister the rtd device */
if (rtd->dev_registered) {
device_remove_file(rtd->dev, &dev_attr_codec_reg);
- device_del(rtd->dev);
+ device_unregister(rtd->dev);
rtd->dev_registered = 0;
}
@@ -2917,7 +2919,7 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
platform_max = mc->platform_max;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
+ uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = platform_max - min;
@@ -2941,12 +2943,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
+ unsigned int rreg = mc->rreg;
unsigned int shift = mc->shift;
int min = mc->min;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val_mask;
+ int ret;
val = ((ucontrol->value.integer.value[0] + min) & mask);
if (invert)
@@ -2954,7 +2958,21 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val_mask = mask << shift;
val = val << shift;
- return snd_soc_update_bits_locked(codec, reg, val_mask, val);
+ ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+ if (ret != 0)
+ return ret;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ val = ((ucontrol->value.integer.value[1] + min) & mask);
+ if (invert)
+ val = max - val;
+ val_mask = mask << shift;
+ val = val << shift;
+
+ ret = snd_soc_update_bits_locked(codec, rreg, val_mask, val);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
@@ -2974,6 +2992,7 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
+ unsigned int rreg = mc->rreg;
unsigned int shift = mc->shift;
int min = mc->min;
int max = mc->max;
@@ -2988,6 +3007,16 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] =
ucontrol->value.integer.value[0] - min;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ ucontrol->value.integer.value[1] =
+ (snd_soc_read(codec, rreg) >> shift) & mask;
+ if (invert)
+ ucontrol->value.integer.value[1] =
+ max - ucontrol->value.integer.value[1];
+ ucontrol->value.integer.value[1] =
+ ucontrol->value.integer.value[1] - min;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index d7711fce119b..cf191e6aebbe 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1243,6 +1243,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
continue;
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e71fe55cebef..0e2ed3d05c45 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -179,6 +179,15 @@ static struct usbmix_name_map audigy2nx_map[] = {
{ 0 } /* terminator */
};
+static struct usbmix_selector_map c400_selectors[] = {
+ {
+ .id = 0x80,
+ .count = 2,
+ .names = (const char*[]) {"Internal", "SPDIF"}
+ },
+ { 0 } /* terminator */
+};
+
static struct usbmix_selector_map audigy2nx_selectors[] = {
{
.id = 14, /* Capture Source */
@@ -367,6 +376,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = hercules_usb51_map,
},
{
+ .id = USB_ID(0x0763, 0x2030),
+ .selector_map = c400_selectors,
+ },
+ {
.id = USB_ID(0x08bb, 0x2702),
.map = linex_map,
.ignore_ctl_error = 1,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 0422b1360af3..15520de1df56 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1206,7 +1206,7 @@ static int snd_c400_create_mixer(struct usb_mixer_interface *mixer)
* are valid they presents mono controls as L and R channels of
* stereo. So we provide a good mixer here.
*/
-struct std_mono_table ebox44_table[] = {
+static struct std_mono_table ebox44_table[] = {
{
.unitid = 4,
.control = 1,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index c6593101c049..d82e378d37cb 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -511,6 +511,16 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
struct snd_usb_substream *sync_subs =
&subs->stream->substream[subs->direction ^ 1];
+ if (subs->sync_endpoint->type != SND_USB_ENDPOINT_TYPE_DATA ||
+ !subs->stream)
+ return snd_usb_endpoint_set_params(subs->sync_endpoint,
+ subs->pcm_format,
+ subs->channels,
+ subs->period_bytes,
+ subs->cur_rate,
+ subs->cur_audiofmt,
+ NULL);
+
/* Try to find the best matching audioformat. */
list_for_each_entry(fp, &sync_subs->fmt_list, list) {
int score = match_endpoint_audioformats(fp, subs->cur_audiofmt,
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 78e845ec65da..64d25a7a4d59 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2289,7 +2289,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.rate_table = (unsigned int[]) {
44100, 48000, 88200, 96000
},
- .clock = 0x81,
+ .clock = 0x80,
}
},
/* Capture */
@@ -2315,7 +2315,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.rate_table = (unsigned int[]) {
44100, 48000, 88200, 96000
},
- .clock = 0x81,
+ .clock = 0x80,
}
},
/* MIDI */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index acc12f004c23..2c971858d6b7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
* rules
*/
err = usb_driver_set_configuration(dev, 2);
- if (err < 0) {
+ if (err < 0)
snd_printdd("error usb_driver_set_configuration: %d\n",
err);
- return -ENODEV;
- }
+ /* Always return an error, so that we stop creating a device
+ that will just be destroyed and recreated with a new
+ configuration */
+ return -ENODEV;
} else
snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
@@ -859,6 +861,17 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
if ((le16_to_cpu(ep->chip->dev->descriptor.idVendor) == 0x23ba) &&
ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
ep->skip_packets = 4;
+
+ /*
+ * M-Audio Fast Track C400 - when packets are not skipped, real world
+ * latency varies by approx. +/- 50 frames (at 96KHz) each time the
+ * stream is (re)started. When skipping packets 16 at endpoint start
+ * up, the real world latency is stable within +/- 1 frame (also
+ * across power cycles).
+ */
+ if (ep->chip->usb_id == USB_ID(0x0763, 0x2030) &&
+ ep->type == SND_USB_ENDPOINT_TYPE_DATA)
+ ep->skip_packets = 16;
}
void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
diff --git a/tools/gator/Makefile b/tools/gator/Makefile
new file mode 100644
index 000000000000..4ab73b0ebedb
--- /dev/null
+++ b/tools/gator/Makefile
@@ -0,0 +1,4 @@
+all:
+ $(MAKE) -C daemon $@
+clean:
+ $(MAKE) -C daemon $@
diff --git a/tools/gator/daemon/Android.mk b/tools/gator/daemon/Android.mk
new file mode 100644
index 000000000000..b8cc7ff3129c
--- /dev/null
+++ b/tools/gator/daemon/Android.mk
@@ -0,0 +1,39 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+XML_H := $(shell cd $(LOCAL_PATH) && make events_xml.h configuration_xml.h)
+
+LOCAL_CFLAGS += -Wall -O3 -mthumb-interwork -fno-exceptions
+
+LOCAL_SRC_FILES := \
+ CapturedXML.cpp \
+ Child.cpp \
+ Collector.cpp \
+ ConfigurationXML.cpp \
+ Fifo.cpp \
+ LocalCapture.cpp \
+ Logging.cpp \
+ main.cpp \
+ OlySocket.cpp \
+ OlyUtility.cpp \
+ Sender.cpp \
+ SessionData.cpp \
+ SessionXML.cpp \
+ StreamlineSetup.cpp \
+ mxml/mxml-attr.c \
+ mxml/mxml-entity.c \
+ mxml/mxml-file.c \
+ mxml/mxml-get.c \
+ mxml/mxml-index.c \
+ mxml/mxml-node.c \
+ mxml/mxml-private.c \
+ mxml/mxml-search.c \
+ mxml/mxml-set.c \
+ mxml/mxml-string.c
+
+LOCAL_C_INCLUDES := $(LOCAL_PATH)
+
+LOCAL_MODULE := gatord
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_EXECUTABLE)
diff --git a/tools/gator/daemon/CapturedXML.cpp b/tools/gator/daemon/CapturedXML.cpp
new file mode 100644
index 000000000000..cc218d7818c5
--- /dev/null
+++ b/tools/gator/daemon/CapturedXML.cpp
@@ -0,0 +1,150 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include "SessionData.h"
+#include "CapturedXML.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+
+CapturedXML::CapturedXML() {
+}
+
+CapturedXML::~CapturedXML() {
+}
+
+mxml_node_t* CapturedXML::getTree(bool includeTime) {
+ bool perfCounters = false;
+ mxml_node_t *xml;
+ mxml_node_t *captured;
+ mxml_node_t *target;
+ mxml_node_t *counters;
+ mxml_node_t *counter;
+ int x;
+
+ for (x=0; x<MAX_PERFORMANCE_COUNTERS; x++) {
+ if (gSessionData->mPerfCounterEnabled[x]) {
+ perfCounters = true;
+ break;
+ }
+ }
+
+ xml = mxmlNewXML("1.0");
+
+ captured = mxmlNewElement(xml, "captured");
+ mxmlElementSetAttr(captured, "version", "1");
+ mxmlElementSetAttrf(captured, "protocol", "%d", PROTOCOL_VERSION);
+ if (includeTime) { // Send the following only after the capture is complete
+ if (time(NULL) > 1267000000) { // If the time is reasonable (after Feb 23, 2010)
+ mxmlElementSetAttrf(captured, "created", "%lu", time(NULL)); // Valid until the year 2038
+ }
+ }
+
+ target = mxmlNewElement(captured, "target");
+ mxmlElementSetAttr(target, "name", gSessionData->mCoreName);
+ mxmlElementSetAttrf(target, "sample_rate", "%d", gSessionData->mSampleRate);
+ mxmlElementSetAttrf(target, "cores", "%d", gSessionData->mCores);
+
+ if (perfCounters) {
+ counters = mxmlNewElement(captured, "counters");
+ for (x = 0; x < MAX_PERFORMANCE_COUNTERS; x++) {
+ if (gSessionData->mPerfCounterEnabled[x]) {
+ counter = mxmlNewElement(counters, "counter");
+ mxmlElementSetAttr(counter, "title", gSessionData->mPerfCounterTitle[x]);
+ mxmlElementSetAttr(counter, "name", gSessionData->mPerfCounterName[x]);
+ mxmlElementSetAttrf(counter, "color", "0x%08x", gSessionData->mPerfCounterColor[x]);
+ mxmlElementSetAttrf(counter, "key", "0x%08x", gSessionData->mPerfCounterKey[x]);
+ mxmlElementSetAttr(counter, "type", gSessionData->mPerfCounterType[x]);
+ mxmlElementSetAttrf(counter, "event", "0x%08x", gSessionData->mPerfCounterEvent[x]);
+ if (gSessionData->mPerfCounterPerCPU[x]) {
+ mxmlElementSetAttr(counter, "per_cpu", "yes");
+ }
+ if (gSessionData->mPerfCounterCount[x] > 0) {
+ mxmlElementSetAttrf(counter, "count", "%d", gSessionData->mPerfCounterCount[x]);
+ }
+ if (strlen(gSessionData->mPerfCounterDisplay[x]) > 0) {
+ mxmlElementSetAttr(counter, "display", gSessionData->mPerfCounterDisplay[x]);
+ }
+ if (strlen(gSessionData->mPerfCounterUnits[x]) > 0) {
+ mxmlElementSetAttr(counter, "units", gSessionData->mPerfCounterUnits[x]);
+ }
+ if (gSessionData->mPerfCounterAverageSelection[x]) {
+ mxmlElementSetAttr(counter, "average_selection", "yes");
+ }
+ mxmlElementSetAttr(counter, "description", gSessionData->mPerfCounterDescription[x]);
+ }
+ }
+ }
+
+ return xml;
+}
+
+char* CapturedXML::getXML(bool includeTime) {
+ char* xml_string;
+ mxml_node_t *xml = getTree(includeTime);
+ xml_string = mxmlSaveAllocString(xml, mxmlWhitespaceCB);
+ mxmlDelete(xml);
+ return xml_string;
+}
+
+void CapturedXML::write(char* path) {
+ char *file = (char*)malloc(PATH_MAX);
+
+ // Set full path
+ snprintf(file, PATH_MAX, "%s/captured.xml", path);
+
+ char* xml = getXML(true);
+ if (util->writeToDisk(file, xml) < 0) {
+ logg->logError(__FILE__, __LINE__, "Error writing %s\nPlease verify the path.", file);
+ handleException();
+ }
+
+ free(xml);
+ free(file);
+}
+
+// whitespace callback utility function used with mini-xml
+const char * mxmlWhitespaceCB(mxml_node_t *node, int loc) {
+ const char *name;
+
+ name = mxmlGetElement(node);
+
+ if (loc == MXML_WS_BEFORE_OPEN) {
+ // Single indentation
+ if (!strcmp(name, "target") || !strcmp(name, "counters"))
+ return("\n ");
+
+ // Double indentation
+ if (!strcmp(name, "counter"))
+ return("\n ");
+
+ // Avoid a carriage return on the first line of the xml file
+ if (!strncmp(name, "?xml", 4))
+ return(NULL);
+
+ // Default - no indentation
+ return("\n");
+ }
+
+ if (loc == MXML_WS_BEFORE_CLOSE) {
+ // No indentation
+ if (!strcmp(name, "captured"))
+ return("\n");
+
+ // Single indentation
+ if (!strcmp(name, "counters"))
+ return("\n ");
+
+ // Default - no carriage return
+ return(NULL);
+ }
+
+ return(NULL);
+}
diff --git a/tools/gator/daemon/CapturedXML.h b/tools/gator/daemon/CapturedXML.h
new file mode 100644
index 000000000000..3cac57697009
--- /dev/null
+++ b/tools/gator/daemon/CapturedXML.h
@@ -0,0 +1,27 @@
+/**
+
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CAPTURED_XML_H__
+#define __CAPTURED_XML_H__
+
+#include "mxml/mxml.h"
+
+class CapturedXML {
+public:
+ CapturedXML();
+ ~CapturedXML();
+ char* getXML(bool includeTime); // the string should be freed by the caller
+ void write(char* path);
+private:
+ mxml_node_t* getTree(bool includeTime);
+};
+
+const char * mxmlWhitespaceCB(mxml_node_t *node, int where);
+
+#endif //__CAPTURED_XML_H__
diff --git a/tools/gator/daemon/Child.cpp b/tools/gator/daemon/Child.cpp
new file mode 100644
index 000000000000..b97e0db1e3cc
--- /dev/null
+++ b/tools/gator/daemon/Child.cpp
@@ -0,0 +1,315 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/syscall.h>
+#include <sys/resource.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+#include "Logging.h"
+#include "CapturedXML.h"
+#include "SessionData.h"
+#include "Child.h"
+#include "LocalCapture.h"
+#include "Collector.h"
+#include "Sender.h"
+#include "OlyUtility.h"
+#include "StreamlineSetup.h"
+#include "ConfigurationXML.h"
+
+static sem_t haltPipeline, senderThreadStarted, startProfile; // Shared by Child and spawned threads
+static Fifo* collectorFifo = NULL; // Shared by Child.cpp and spawned threads
+static Sender* sender = NULL; // Shared by Child.cpp and spawned threads
+Collector* collector = NULL;
+Child* child = NULL; // shared by Child.cpp and main.cpp
+
+extern void cleanUp();
+void handleException() {
+ if (child && child->numExceptions++ > 0) {
+ // it is possible one of the below functions itself can cause an exception, thus allow only one exception
+ logg->logMessage("Received multiple exceptions, terminating the child");
+ exit(1);
+ }
+ fprintf(stderr, "%s", logg->getLastError());
+
+ if (child && child->socket) {
+ if (sender) {
+ // send the error, regardless of the command sent by Streamline
+ sender->writeData(logg->getLastError(), strlen(logg->getLastError()), RESPONSE_ERROR);
+
+ // cannot close the socket before Streamline issues the command, so wait for the command before exiting
+ if (gSessionData->mWaitingOnCommand) {
+ char discard;
+ child->socket->receiveNBytes(&discard, 1);
+ }
+
+ // this indirectly calls close socket which will ensure the data has been sent
+ delete sender;
+ }
+ }
+
+ if (gSessionData->mLocalCapture)
+ cleanUp();
+
+ exit(1);
+}
+
+// CTRL C Signal Handler for child process
+void child_handler(int signum) {
+ static bool beenHere = false;
+ if (beenHere == true) {
+ logg->logMessage("Gator is being forced to shut down.");
+ exit(1);
+ }
+ beenHere = true;
+ logg->logMessage("Gator is shutting down.");
+ if (signum == SIGALRM || !collector) {
+ exit(1);
+ } else {
+ child->endSession();
+ alarm(5); // Safety net in case endSession does not complete within 5 seconds
+ }
+}
+
+void* durationThread(void* pVoid) {
+ prctl(PR_SET_NAME, (unsigned long)&"gatord-duration", 0, 0, 0);
+ sem_wait(&startProfile);
+ if (gSessionData->mSessionIsActive) {
+ // Time out after duration seconds
+ // Add a second for host-side filtering
+ sleep(gSessionData->mDuration + 1);
+ if (gSessionData->mSessionIsActive) {
+ logg->logMessage("Duration expired.");
+ child->endSession();
+ }
+ }
+ logg->logMessage("Exit duration thread");
+ return 0;
+}
+
+void* stopThread(void* pVoid) {
+ int length;
+ char type;
+ OlySocket* socket = child->socket;
+
+ prctl(PR_SET_NAME, (unsigned long)&"gatord-stopper", 0, 0, 0);
+ while (gSessionData->mSessionIsActive) {
+ // This thread will stall until the APC_STOP or PING command is received over the socket or the socket is disconnected
+ const int result = socket->receiveNBytes(&type, sizeof(type));
+ if (result == -1) {
+ child->endSession();
+ } else if (result > 0) {
+ if ((type != COMMAND_APC_STOP) && (type != COMMAND_PING)) {
+ logg->logMessage("INVESTIGATE: Received unknown command type %d", type);
+ } else {
+ // verify a length of zero
+ if (socket->receiveNBytes((char*)&length, sizeof(length)) < 0) {
+ break;
+ }
+
+ if (length == 0) {
+ if (type == COMMAND_APC_STOP) {
+ logg->logMessage("Stop command received.");
+ child->endSession();
+ } else {
+ // Ping is used to make sure gator is alive and requires an ACK as the response
+ logg->logMessage("Ping command received.");
+ sender->writeData(NULL, 0, RESPONSE_ACK);
+ }
+ } else {
+ logg->logMessage("INVESTIGATE: Received stop command but with length = %d", length);
+ }
+ }
+ }
+ }
+
+ logg->logMessage("Exit stop thread");
+ return 0;
+}
+
+void* senderThread(void* pVoid) {
+ int length;
+ char* data;
+ char end_sequence[] = {RESPONSE_APC_DATA, 0, 0, 0, 0};
+
+ sem_post(&senderThreadStarted);
+ prctl(PR_SET_NAME, (unsigned long)&"gatord-sender", 0, 0, 0);
+ sem_wait(&haltPipeline);
+
+ do {
+ data = collectorFifo->read(&length);
+ sender->writeData(data, length, RESPONSE_APC_DATA);
+ } while (length > 0);
+
+ // write end-of-capture sequence
+ if (!gSessionData->mLocalCapture) {
+ sender->writeData(end_sequence, sizeof(end_sequence), RESPONSE_APC_DATA);
+ }
+
+ logg->logMessage("Exit sender thread");
+ return 0;
+}
+
+Child::Child() {
+ initialization();
+ gSessionData->mLocalCapture = true;
+}
+
+Child::Child(OlySocket* sock, int conn) {
+ initialization();
+ socket = sock;
+ mNumConnections = conn;
+}
+
+Child::~Child() {
+}
+
+void Child::initialization() {
+ // Set up different handlers for signals
+ gSessionData->mSessionIsActive = true;
+ signal(SIGINT, child_handler);
+ signal(SIGTERM, child_handler);
+ signal(SIGABRT, child_handler);
+ signal(SIGALRM, child_handler);
+ socket = NULL;
+ numExceptions = 0;
+ mNumConnections = 0;
+
+ // Initialize semaphores
+ sem_init(&senderThreadStarted, 0, 0);
+ sem_init(&startProfile, 0, 0);
+}
+
+void Child::endSession() {
+ gSessionData->mSessionIsActive = false;
+ collector->stop();
+ sem_post(&haltPipeline);
+}
+
+void Child::run() {
+ char* collectBuffer;
+ int bytesCollected = 0;
+ LocalCapture* localCapture = NULL;
+ pthread_t durationThreadID, stopThreadID, senderThreadID;
+
+ prctl(PR_SET_NAME, (unsigned long)&"gatord-child", 0, 0, 0);
+
+ // Disable line wrapping when generating xml files; carriage returns and indentation to be added manually
+ mxmlSetWrapMargin(0);
+
+ // Instantiate the Sender - must be done first, after which error messages can be sent
+ sender = new Sender(socket);
+
+ if (mNumConnections > 1) {
+ logg->logError(__FILE__, __LINE__, "Session already in progress");
+ handleException();
+ }
+
+ // Populate gSessionData with the configuration
+ new ConfigurationXML();
+
+ // Set up the driver; must be done after gSessionData->mPerfCounterType[] is populated
+ collector = new Collector();
+
+ // Start up and parse session xml
+ if (socket) {
+ // Respond to Streamline requests
+ StreamlineSetup ss(socket);
+ } else {
+ char* xmlString;
+ xmlString = util->readFromDisk(gSessionData->mSessionXMLPath);
+ if (xmlString == 0) {
+ logg->logError(__FILE__, __LINE__, "Unable to read session xml file: %s", gSessionData->mSessionXMLPath);
+ handleException();
+ }
+ gSessionData->parseSessionXML(xmlString);
+ localCapture = new LocalCapture();
+ localCapture->createAPCDirectory(gSessionData->mTargetPath);
+ localCapture->copyImages(gSessionData->mImages);
+ localCapture->write(xmlString);
+ sender->createDataFile(gSessionData->mAPCDir);
+ free(xmlString);
+ }
+
+ // Write configuration into the driver
+ collector->setupPerfCounters();
+
+ // Create user-space buffers, add 5 to the size to account for the 1-byte type and 4-byte length
+ logg->logMessage("Created %d MB collector buffer with a %d-byte ragged end", gSessionData->mTotalBufferSize, collector->getBufferSize());
+ collectorFifo = new Fifo(collector->getBufferSize() + 5, gSessionData->mTotalBufferSize* 1024 * 1024);
+
+ // Get the initial pointer to the collect buffer
+ collectBuffer = collectorFifo->start();
+
+ // Sender thread shall be halted until it is signaled for one shot mode
+ sem_init(&haltPipeline, 0, gSessionData->mOneShot ? 0 : 2);
+
+ // Create the duration, stop, and sender threads
+ bool thread_creation_success = true;
+ if (gSessionData->mDuration > 0 && pthread_create(&durationThreadID, NULL, durationThread, NULL)) {
+ thread_creation_success = false;
+ } else if (socket && pthread_create(&stopThreadID, NULL, stopThread, NULL)) {
+ thread_creation_success = false;
+ } else if (pthread_create(&senderThreadID, NULL, senderThread, NULL)){
+ thread_creation_success = false;
+ }
+
+ if (!thread_creation_success) {
+ logg->logError(__FILE__, __LINE__, "Failed to create gator threads");
+ handleException();
+ }
+
+ // Wait until thread has started
+ sem_wait(&senderThreadStarted);
+
+ // Start profiling
+ logg->logMessage("********** Profiling started **********");
+ collector->start();
+ sem_post(&startProfile);
+
+ // Collect Data
+ do {
+ // This command will stall until data is received from the driver
+ bytesCollected = collector->collect(collectBuffer);
+
+ // In one shot mode, stop collection once all the buffers are filled
+ if (gSessionData->mOneShot && gSessionData->mSessionIsActive) {
+ if (bytesCollected == -1 || collectorFifo->willFill(bytesCollected)) {
+ logg->logMessage("One shot");
+ endSession();
+ }
+ }
+ collectBuffer = collectorFifo->write(bytesCollected);
+ } while (bytesCollected > 0);
+ logg->logMessage("Exit collect data loop");
+
+ // Wait for the other threads to exit
+ pthread_join(senderThreadID, NULL);
+
+ // Shutting down the connection should break the stop thread which is stalling on the socket recv() function
+ if (socket) {
+ logg->logMessage("Waiting on stop thread");
+ socket->shutdownConnection();
+ pthread_join(stopThreadID, NULL);
+ }
+
+ // Write the captured xml file
+ if (gSessionData->mLocalCapture) {
+ CapturedXML capturedXML;
+ capturedXML.write(gSessionData->mAPCDir);
+ }
+
+ logg->logMessage("Profiling ended.");
+
+ delete collectorFifo;
+ delete sender;
+ delete collector;
+ delete localCapture;
+}
diff --git a/tools/gator/daemon/Child.h b/tools/gator/daemon/Child.h
new file mode 100644
index 000000000000..612ca7cf3e2c
--- /dev/null
+++ b/tools/gator/daemon/Child.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CHILD_H__
+#define __CHILD_H__
+
+#include <pthread.h>
+#include "Fifo.h"
+#include "OlySocket.h"
+
+class Child {
+public:
+ Child();
+ Child(OlySocket* sock, int numConnections);
+ ~Child();
+ void run();
+ OlySocket *socket;
+ void endSession();
+ int numExceptions;
+private:
+ int mNumConnections;
+
+ void initialization();
+};
+
+#endif //__CHILD_H__
diff --git a/tools/gator/daemon/Collector.cpp b/tools/gator/daemon/Collector.cpp
new file mode 100644
index 000000000000..2e4af0cc7175
--- /dev/null
+++ b/tools/gator/daemon/Collector.cpp
@@ -0,0 +1,292 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <fcntl.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/time.h>
+#include "Collector.h"
+#include "SessionData.h"
+#include "Logging.h"
+#include "Sender.h"
+
+// Driver initialization independent of session settings
+Collector::Collector() {
+ char text[sizeof(gSessionData->mPerfCounterType[0]) + 30]; // sufficiently large to hold all /dev/gator/events/<types>/<file>
+
+ mBufferFD = 0;
+
+ checkVersion();
+
+ int enable = -1;
+ if (readIntDriver("/dev/gator/enable", &enable) != 0 || enable != 0) {
+ logg->logError(__FILE__, __LINE__, "Driver already enabled, possibly a session is already in progress.");
+ handleException();
+ }
+
+ readIntDriver("/dev/gator/cpu_cores", &gSessionData->mCores);
+ if (gSessionData->mCores == 0) {
+ gSessionData->mCores = 1;
+ }
+
+ mBufferSize = 0;
+ if (readIntDriver("/dev/gator/buffer_size", &mBufferSize) || mBufferSize <= 0) {
+ logg->logError(__FILE__, __LINE__, "Unable to read the driver buffer size");
+ handleException();
+ }
+
+ getCoreName();
+
+ enablePerfCounters();
+
+ // Read unchanging keys from driver which are created at insmod'ing of gator.ko
+ for (int i = 0; i < MAX_PERFORMANCE_COUNTERS; i++) {
+ if (gSessionData->mPerfCounterEnabled[i]) {
+ snprintf(text, sizeof(text), "/dev/gator/events/%s/key", gSessionData->mPerfCounterType[i]);
+ readIntDriver(text, &gSessionData->mPerfCounterKey[i]);
+ }
+ }
+}
+
+Collector::~Collector() {
+ // Write zero for safety, as a zero should have already been written
+ writeDriver("/dev/gator/enable", "0");
+
+ // Calls event_buffer_release in the driver
+ if (mBufferFD) {
+ close(mBufferFD);
+ }
+}
+
+#include <dirent.h>
+void Collector::enablePerfCounters() {
+ char text[sizeof(gSessionData->mPerfCounterType[0]) + 30]; // sufficiently large to hold all /dev/gator/events/<types>/enabled
+
+ // Initialize all perf counters in the driver, i.e. set enabled to zero
+ struct dirent *ent;
+ DIR* dir = opendir("/dev/gator/events");
+ if (dir) {
+ while ((ent = readdir(dir)) != NULL) {
+ // skip hidden files, current dir, and parent dir
+ if (ent->d_name[0] == '.')
+ continue;
+ snprintf(text, sizeof(text), "/dev/gator/events/%s/enabled", ent->d_name);
+ writeDriver(text, 0);
+ }
+ closedir (dir);
+ }
+
+ for (int i=0; i<MAX_PERFORMANCE_COUNTERS; i++) {
+ if (!gSessionData->mPerfCounterEnabled[i]) {
+ continue;
+ }
+ snprintf(text, sizeof(text), "/dev/gator/events/%s/enabled", gSessionData->mPerfCounterType[i]);
+ if (writeReadDriver(text, &gSessionData->mPerfCounterEnabled[i])) {
+ // Disable those events that don't exist on this hardware platform even though they exist in configuration.xml
+ gSessionData->mPerfCounterEnabled[i] = 0;
+ continue;
+ }
+ }
+}
+
+void Collector::setupPerfCounters() {
+ char base[sizeof(gSessionData->mPerfCounterType[0]) + 20]; // sufficiently large to hold all /dev/gator/events/<types>
+ char text[sizeof(gSessionData->mPerfCounterType[0]) + 30]; // sufficiently large to hold all /dev/gator/events/<types>/<file>
+
+ for (int i=0; i<MAX_PERFORMANCE_COUNTERS; i++) {
+ if (!gSessionData->mPerfCounterEnabled[i]) {
+ continue;
+ }
+ snprintf(base, sizeof(base), "/dev/gator/events/%s", gSessionData->mPerfCounterType[i]);
+ snprintf(text, sizeof(text), "%s/event", base);
+ writeDriver(text, gSessionData->mPerfCounterEvent[i]);
+ if (gSessionData->mPerfCounterEBSCapable[i]) {
+ snprintf(text, sizeof(text), "%s/count", base);
+ if (access(text, F_OK) == 0) {
+ if (writeReadDriver(text, &gSessionData->mPerfCounterCount[i]) && gSessionData->mPerfCounterCount[i] > 0) {
+ logg->logError(__FILE__, __LINE__, "Cannot enable EBS for %s:%s with a count of %d\n", gSessionData->mPerfCounterTitle[i], gSessionData->mPerfCounterName[i], gSessionData->mPerfCounterCount[i]);
+ handleException();
+ }
+ } else if (gSessionData->mPerfCounterCount[i] > 0) {
+ logg->logError(__FILE__, __LINE__, "Event Based Sampling is only supported with kernel versions 3.0.0 and higher with CONFIG_PERF_EVENTS=y, and CONFIG_HW_PERF_EVENTS=y\n");
+ handleException();
+ }
+ }
+ }
+}
+
+void Collector::checkVersion() {
+ int driver_version = 0;
+
+ if (readIntDriver("/dev/gator/version", &driver_version) == -1) {
+ logg->logError(__FILE__, __LINE__, "Error reading gator driver version");
+ handleException();
+ }
+
+ // Verify the driver version matches the daemon version
+ if (driver_version != PROTOCOL_VERSION) {
+ if ((driver_version > PROTOCOL_DEV) || (PROTOCOL_VERSION > PROTOCOL_DEV)) {
+ // One of the mismatched versions is development version
+ logg->logError(__FILE__, __LINE__,
+ "DEVELOPMENT BUILD MISMATCH: gator driver version \"%d\" is not in sync with gator daemon version \"%d\".\n"
+ ">> The following must be synchronized from engineering repository:\n"
+ ">> * gator driver\n"
+ ">> * gator daemon\n"
+ ">> * Streamline", driver_version, PROTOCOL_VERSION);
+ handleException();
+ } else {
+ // Release version mismatch
+ logg->logError(__FILE__, __LINE__,
+ "gator driver version \"%d\" is different than gator daemon version \"%d\".\n"
+ ">> Please upgrade the driver and daemon to the latest versions.", driver_version, PROTOCOL_VERSION);
+ handleException();
+ }
+ }
+}
+
+void Collector::start() {
+ // Set the maximum backtrace depth
+ if (writeReadDriver("/dev/gator/backtrace_depth", &gSessionData->mBacktraceDepth)) {
+ logg->logError(__FILE__, __LINE__, "Unable to set the driver backtrace depth");
+ handleException();
+ }
+
+ // open the buffer which calls userspace_buffer_open() in the driver
+ mBufferFD = open("/dev/gator/buffer", O_RDONLY);
+ if (mBufferFD < 0) {
+ logg->logError(__FILE__, __LINE__, "The gator driver did not set up properly. Please view the linux console or dmesg log for more information on the failure.");
+ handleException();
+ }
+
+ // set the tick rate of the profiling timer
+ if (writeReadDriver("/dev/gator/tick", &gSessionData->mSampleRate) != 0) {
+ logg->logError(__FILE__, __LINE__, "Unable to set the driver tick");
+ handleException();
+ }
+
+ // notify the kernel of the response type
+ int response_type = gSessionData->mLocalCapture ? 0 : RESPONSE_APC_DATA;
+ if (writeDriver("/dev/gator/response_type", response_type)) {
+ logg->logError(__FILE__, __LINE__, "Unable to write the response type");
+ handleException();
+ }
+
+ logg->logMessage("Start the driver");
+
+ // This command makes the driver start profiling by calling gator_op_start() in the driver
+ if (writeDriver("/dev/gator/enable", "1") != 0) {
+ logg->logError(__FILE__, __LINE__, "The gator driver did not start properly. Please view the linux console or dmesg log for more information on the failure.");
+ handleException();
+ }
+
+ lseek(mBufferFD, 0, SEEK_SET);
+}
+
+// These commands should cause the read() function in collect() to return
+void Collector::stop() {
+ // This will stop the driver from profiling
+ if (writeDriver("/dev/gator/enable", "0") != 0) {
+ logg->logMessage("Stopping kernel failed");
+ }
+}
+
+int Collector::collect(char* buffer) {
+ // Calls event_buffer_read in the driver
+ int bytesRead;
+
+ errno = 0;
+ bytesRead = read(mBufferFD, buffer, mBufferSize);
+
+ // If read() returned due to an interrupt signal, re-read to obtain the last bit of collected data
+ if (bytesRead == -1 && errno == EINTR) {
+ bytesRead = read(mBufferFD, buffer, mBufferSize);
+ }
+
+ // return the total bytes written
+ logg->logMessage("Driver read of %d bytes", bytesRead);
+ return bytesRead;
+}
+
+void Collector::getCoreName() {
+ char temp[256]; // arbitrarily large amount
+ strcpy(gSessionData->mCoreName, "unknown");
+
+ FILE* f = fopen("/proc/cpuinfo", "r");
+ if (f == NULL) {
+ logg->logMessage("Error opening /proc/cpuinfo\n"
+ "The core name in the captured xml file will be 'unknown'.");
+ return;
+ }
+
+ while (fgets(temp, sizeof(temp), f)) {
+ if (strlen(temp) > 0) {
+ temp[strlen(temp) - 1] = 0; // Replace the line feed with a null
+ }
+
+ if (strstr(temp, "Hardware") != 0) {
+ char* position = strchr(temp, ':');
+ if (position == NULL || (unsigned int)(position - temp) + 2 >= strlen(temp)) {
+ logg->logMessage("Unknown format of /proc/cpuinfo\n"
+ "The core name in the captured xml file will be 'unknown'.");
+ return;
+ }
+ strncpy(gSessionData->mCoreName, (char*)((long)position + 2), sizeof(gSessionData->mCoreName));
+ gSessionData->mCoreName[sizeof(gSessionData->mCoreName) - 1] = 0; // strncpy does not guarantee a null-terminated string
+ fclose(f);
+ return;
+ }
+ }
+
+ logg->logMessage("Could not determine core name from /proc/cpuinfo\n"
+ "The core name in the captured xml file will be 'unknown'.");
+ fclose(f);
+}
+
+int Collector::readIntDriver(const char* fullpath, int* value) {
+ FILE* file = fopen(fullpath, "r");
+ if (file == NULL) {
+ return -1;
+ }
+ if (fscanf(file, "%u", value) != 1) {
+ fclose(file);
+ logg->logMessage("Invalid value in file %s", fullpath);
+ return -1;
+ }
+ fclose(file);
+ return 0;
+}
+
+int Collector::writeDriver(const char* path, int value) {
+ char data[40]; // Sufficiently large to hold any integer
+ snprintf(data, sizeof(data), "%d", value);
+ return writeDriver(path, data);
+}
+
+int Collector::writeDriver(const char* fullpath, const char* data) {
+ int fd = open(fullpath, O_WRONLY);
+ if (fd < 0) {
+ return -1;
+ }
+ if (write(fd, data, strlen(data)) < 0) {
+ close(fd);
+ logg->logMessage("Opened but could not write to %s", fullpath);
+ return -1;
+ }
+ close(fd);
+ return 0;
+}
+
+int Collector::writeReadDriver(const char* path, int* value) {
+ if (writeDriver(path, *value) || readIntDriver(path, value)) {
+ return -1;
+ }
+ return 0;
+}
diff --git a/tools/gator/daemon/Collector.h b/tools/gator/daemon/Collector.h
new file mode 100644
index 000000000000..5977a725ecc3
--- /dev/null
+++ b/tools/gator/daemon/Collector.h
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __COLLECTOR_H__
+#define __COLLECTOR_H__
+
+#include <stdio.h>
+
+class Collector {
+public:
+ Collector();
+ ~Collector();
+ void start();
+ void stop();
+ int collect(char* buffer);
+ void enablePerfCounters();
+ void setupPerfCounters();
+ int getBufferSize() {return mBufferSize;}
+private:
+ int mBufferSize;
+ int mBufferFD;
+
+ void checkVersion();
+ void getCoreName();
+
+ int readIntDriver(const char* path, int* value);
+ int writeDriver(const char* path, int value);
+ int writeDriver(const char* path, const char* data);
+ int writeReadDriver(const char* path, int* value);
+};
+
+#endif //__COLLECTOR_H__
diff --git a/tools/gator/daemon/ConfigurationXML.cpp b/tools/gator/daemon/ConfigurationXML.cpp
new file mode 100644
index 000000000000..14c280923ca3
--- /dev/null
+++ b/tools/gator/daemon/ConfigurationXML.cpp
@@ -0,0 +1,188 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include "ConfigurationXML.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+#include "SessionData.h"
+
+static const char* ATTR_COUNTER = "counter";
+static const char* ATTR_REVISION = "revision";
+static const char* ATTR_TITLE = "title";
+static const char* ATTR_NAME = "name";
+static const char* ATTR_EVENT = "event";
+static const char* ATTR_COUNT = "count";
+static const char* ATTR_PER_CPU = "per_cpu";
+static const char* ATTR_DESCRIPTION = "description";
+static const char* ATTR_EBS = "supports_event_based_sampling";
+static const char* ATTR_DISPLAY = "display";
+static const char* ATTR_UNITS = "units";
+static const char* ATTR_AVERAGE_SELECTION = "average_selection";
+
+ConfigurationXML::ConfigurationXML() {
+ const char * configuration_xml;
+ unsigned int configuration_xml_len;
+ getDefaultConfigurationXml(configuration_xml, configuration_xml_len);
+
+ mIndex = 0;
+ char* path = (char*)malloc(PATH_MAX);
+
+ if (gSessionData->mConfigurationXMLPath) {
+ strncpy(path, gSessionData->mConfigurationXMLPath, PATH_MAX);
+ } else {
+ if (util->getApplicationFullPath(path, PATH_MAX) != 0) {
+ logg->logMessage("Unable to determine the full path of gatord, the cwd will be used");
+ }
+ strncat(path, "configuration.xml", PATH_MAX - strlen(path) - 1);
+ }
+ mConfigurationXML = util->readFromDisk(path);
+
+ if (mConfigurationXML == NULL) {
+ logg->logMessage("Unable to locate configuration.xml, using default in binary");
+ // null-terminate configuration_xml
+ mConfigurationXML = (char*)malloc(configuration_xml_len + 1);
+ memcpy(mConfigurationXML, (const void*)configuration_xml, configuration_xml_len);
+ mConfigurationXML[configuration_xml_len] = 0;
+ }
+
+ // disable all counters prior to parsing the configuration xml
+ for (int i = 0; i < MAX_PERFORMANCE_COUNTERS; i++) {
+ gSessionData->mPerfCounterEnabled[i] = 0;
+ }
+
+ // clear counter overflow
+ gSessionData->mCounterOverflow = false;
+
+ int ret = parse(mConfigurationXML);
+ if (ret == 1) {
+ // remove configuration.xml on disk to use the default
+ if (remove(path) != 0) {
+ logg->logError(__FILE__, __LINE__, "Invalid configuration.xml file detected and unable to delete it. To resolve, delete configuration.xml on disk");
+ handleException();
+ }
+ logg->logMessage("Invalid configuration.xml file detected and removed");
+ }
+
+ validate();
+
+ free(path);
+}
+
+ConfigurationXML::~ConfigurationXML() {
+ if (mConfigurationXML) {
+ free((void*)mConfigurationXML);
+ }
+}
+
+int ConfigurationXML::parse(const char* configurationXML) {
+ mxml_node_t *tree, *node;
+ int ret;
+
+ tree = mxmlLoadString(NULL, configurationXML, MXML_NO_CALLBACK);
+
+ node = mxmlGetFirstChild(tree);
+ while (node && mxmlGetType(node) != MXML_ELEMENT)
+ node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
+
+ ret = configurationsTag(node);
+
+ node = mxmlGetFirstChild(node);
+ while (node) {
+ if (mxmlGetType(node) != MXML_ELEMENT) {
+ node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
+ continue;
+ }
+ configurationTag(node);
+ node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
+ }
+
+ mxmlDelete(tree);
+
+ return ret;
+}
+
+void ConfigurationXML::validate(void) {
+ for (int i = 0; i < MAX_PERFORMANCE_COUNTERS; i++) {
+ if (gSessionData->mPerfCounterEnabled[i]) {
+ if (strcmp(gSessionData->mPerfCounterType[i], "") == 0) {
+ logg->logError(__FILE__, __LINE__, "Invalid required attribute in configuration.xml:\n counter=\"%s\"\n title=\"%s\"\n name=\"%s\"\n event=%d\n", gSessionData->mPerfCounterType[i], gSessionData->mPerfCounterTitle[i], gSessionData->mPerfCounterName[i], gSessionData->mPerfCounterEvent[i]);
+ handleException();
+ }
+
+ // iterate through the remaining enabled performance counters
+ for (int j = i + 1; j < MAX_PERFORMANCE_COUNTERS; j++) {
+ if (gSessionData->mPerfCounterEnabled[j]) {
+ // check if the types are the same
+ if (strcmp(gSessionData->mPerfCounterType[i], gSessionData->mPerfCounterType[j]) == 0) {
+ logg->logError(__FILE__, __LINE__, "Duplicate performance counter type in configuration.xml: %s", gSessionData->mPerfCounterType[i]);
+ handleException();
+ }
+ }
+ }
+ }
+ }
+}
+
+#define CONFIGURATION_REVISION 1
+int ConfigurationXML::configurationsTag(mxml_node_t *node) {
+ const char* revision_string;
+
+ revision_string = mxmlElementGetAttr(node, ATTR_REVISION);
+ if (!revision_string) {
+ return 1; //revision issue;
+ }
+
+ int revision = strtol(revision_string, NULL, 10);
+ if (revision < CONFIGURATION_REVISION) {
+ return 1; // revision issue
+ }
+
+ return 0;
+}
+
+void ConfigurationXML::configurationTag(mxml_node_t *node) {
+ // handle all other performance counters
+ if (mIndex >= MAX_PERFORMANCE_COUNTERS) {
+ gSessionData->mCounterOverflow = true;
+ return;
+ }
+
+ // read attributes
+ if (mxmlElementGetAttr(node, ATTR_COUNTER)) strncpy(gSessionData->mPerfCounterType[mIndex], mxmlElementGetAttr(node, ATTR_COUNTER), sizeof(gSessionData->mPerfCounterType[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_TITLE)) strncpy(gSessionData->mPerfCounterTitle[mIndex], mxmlElementGetAttr(node, ATTR_TITLE), sizeof(gSessionData->mPerfCounterTitle[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_NAME)) strncpy(gSessionData->mPerfCounterName[mIndex], mxmlElementGetAttr(node, ATTR_NAME), sizeof(gSessionData->mPerfCounterName[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_DESCRIPTION)) strncpy(gSessionData->mPerfCounterDescription[mIndex], mxmlElementGetAttr(node, ATTR_DESCRIPTION), sizeof(gSessionData->mPerfCounterDescription[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_EVENT)) gSessionData->mPerfCounterEvent[mIndex] = strtol(mxmlElementGetAttr(node, ATTR_EVENT), NULL, 16);
+ if (mxmlElementGetAttr(node, ATTR_COUNT)) gSessionData->mPerfCounterCount[mIndex] = strtol(mxmlElementGetAttr(node, ATTR_COUNT), NULL, 10);
+ if (mxmlElementGetAttr(node, ATTR_PER_CPU)) gSessionData->mPerfCounterPerCPU[mIndex] = util->stringToBool(mxmlElementGetAttr(node, ATTR_PER_CPU), false);
+ if (mxmlElementGetAttr(node, ATTR_EBS)) gSessionData->mPerfCounterEBSCapable[mIndex] = util->stringToBool(mxmlElementGetAttr(node, ATTR_EBS), false);
+ if (mxmlElementGetAttr(node, ATTR_DISPLAY)) strncpy(gSessionData->mPerfCounterDisplay[mIndex], mxmlElementGetAttr(node, ATTR_DISPLAY), sizeof(gSessionData->mPerfCounterDisplay[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_UNITS)) strncpy(gSessionData->mPerfCounterUnits[mIndex], mxmlElementGetAttr(node, ATTR_UNITS), sizeof(gSessionData->mPerfCounterUnits[mIndex]));
+ if (mxmlElementGetAttr(node, ATTR_AVERAGE_SELECTION)) gSessionData->mPerfCounterAverageSelection[mIndex] = util->stringToBool(mxmlElementGetAttr(node, ATTR_AVERAGE_SELECTION), false);
+ gSessionData->mPerfCounterEnabled[mIndex] = true;
+
+ // strncpy does not guarantee a null-terminated string
+ gSessionData->mPerfCounterType[mIndex][sizeof(gSessionData->mPerfCounterType[mIndex]) - 1] = 0;
+ gSessionData->mPerfCounterTitle[mIndex][sizeof(gSessionData->mPerfCounterTitle[mIndex]) - 1] = 0;
+ gSessionData->mPerfCounterName[mIndex][sizeof(gSessionData->mPerfCounterName[mIndex]) - 1] = 0;
+ gSessionData->mPerfCounterDescription[mIndex][sizeof(gSessionData->mPerfCounterDescription[mIndex]) - 1] = 0;
+ gSessionData->mPerfCounterDisplay[mIndex][sizeof(gSessionData->mPerfCounterDisplay[mIndex]) - 1] = 0;
+ gSessionData->mPerfCounterUnits[mIndex][sizeof(gSessionData->mPerfCounterUnits[mIndex]) - 1] = 0;
+
+ // update counter index
+ mIndex++;
+}
+
+void ConfigurationXML::getDefaultConfigurationXml(const char * & xml, unsigned int & len) {
+#include "configuration_xml.h" // defines and initializes char configuration_xml[] and int configuration_xml_len
+ xml = (const char *)configuration_xml;
+ len = configuration_xml_len;
+}
diff --git a/tools/gator/daemon/ConfigurationXML.h b/tools/gator/daemon/ConfigurationXML.h
new file mode 100644
index 000000000000..66ad587320ae
--- /dev/null
+++ b/tools/gator/daemon/ConfigurationXML.h
@@ -0,0 +1,32 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COUNTERS_H
+#define COUNTERS_H
+
+#include "mxml/mxml.h"
+
+class ConfigurationXML {
+public:
+ static void getDefaultConfigurationXml(const char * & xml, unsigned int & len);
+
+ ConfigurationXML();
+ ~ConfigurationXML();
+ const char* getConfigurationXML() {return mConfigurationXML;}
+ void validate(void);
+
+private:
+ char* mConfigurationXML;
+ int mIndex;
+
+ int parse(const char* xmlFile);
+ int configurationsTag(mxml_node_t *node);
+ void configurationTag(mxml_node_t *node);
+};
+
+#endif // COUNTERS_H
diff --git a/tools/gator/daemon/Fifo.cpp b/tools/gator/daemon/Fifo.cpp
new file mode 100644
index 000000000000..4a2745299663
--- /dev/null
+++ b/tools/gator/daemon/Fifo.cpp
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include "Fifo.h"
+#include "Logging.h"
+
+// bufferSize is the amount of data to be filled
+// singleBufferSize is the maximum size that may be filled during a single write
+// (bufferSize + singleBufferSize) will be allocated
+Fifo::Fifo(int singleBufferSize, int bufferSize) {
+ mWrite = mRead = mReadCommit = mRaggedEnd = 0;
+ mWrapThreshold = bufferSize;
+ mSingleBufferSize = singleBufferSize;
+ mBuffer = (char*)valloc(bufferSize + singleBufferSize);
+ mEnd = false;
+
+ if (mBuffer == NULL) {
+ logg->logError(__FILE__, __LINE__, "failed to allocate %d bytes", bufferSize + singleBufferSize);
+ handleException();
+ }
+
+ if (sem_init(&mWaitForSpaceSem, 0, 0) || sem_init(&mWaitForDataSem, 0, 0)) {
+ logg->logError(__FILE__, __LINE__, "sem_init() failed");
+ handleException();
+ }
+}
+
+Fifo::~Fifo() {
+ free(mBuffer);
+ sem_destroy(&mWaitForSpaceSem);
+ sem_destroy(&mWaitForDataSem);
+}
+
+int Fifo::numBytesFilled() const {
+ return mWrite - mRead + mRaggedEnd;
+}
+
+char* Fifo::start() const {
+ return mBuffer;
+}
+
+bool Fifo::isEmpty() const {
+ return mRead == mWrite && mRaggedEnd == 0;
+}
+
+bool Fifo::isFull() const {
+ return willFill(0);
+}
+
+// Determines if the buffer will fill assuming 'additional' bytes will be added to the buffer
+// 'full' means there is less than singleBufferSize bytes available contiguously; it does not mean there are zero bytes available
+bool Fifo::willFill(int additional) const {
+ if (mWrite > mRead) {
+ if (numBytesFilled() + additional < mWrapThreshold) {
+ return false;
+ }
+ } else {
+ if (numBytesFilled() + additional < mWrapThreshold - mSingleBufferSize) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function will stall until contiguous singleBufferSize bytes are available
+char* Fifo::write(int length) {
+ if (length <= 0) {
+ length = 0;
+ mEnd = true;
+ }
+
+ // update the write pointer
+ mWrite += length;
+
+ // handle the wrap-around
+ if (mWrite >= mWrapThreshold) {
+ mRaggedEnd = mWrite;
+ mWrite = 0;
+ }
+
+ // send a notification that data is ready
+ sem_post(&mWaitForDataSem);
+
+ // wait for space
+ while (isFull()) {
+ sem_wait(&mWaitForSpaceSem);
+ }
+
+ return &mBuffer[mWrite];
+}
+
+// This function will stall until data is available
+char* Fifo::read(int *const length) {
+ // update the read pointer now that the data has been handled
+ mRead = mReadCommit;
+
+ // handle the wrap-around
+ if (mRead >= mWrapThreshold) {
+ mRaggedEnd = mRead = mReadCommit = 0;
+ }
+
+ // send a notification that data is free (space is available)
+ sem_post(&mWaitForSpaceSem);
+
+ // wait for data
+ while (isEmpty() && !mEnd) {
+ sem_wait(&mWaitForDataSem);
+ }
+
+ // obtain the length
+ do {
+ mReadCommit = mRaggedEnd ? mRaggedEnd : mWrite;
+ *length = mReadCommit - mRead;
+ } while (*length < 0); // plugs race condition without using semaphores
+
+ return &mBuffer[mRead];
+}
diff --git a/tools/gator/daemon/Fifo.h b/tools/gator/daemon/Fifo.h
new file mode 100644
index 000000000000..e2a5d94c68d2
--- /dev/null
+++ b/tools/gator/daemon/Fifo.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __FIFO_H__
+#define __FIFO_H__
+
+#include <semaphore.h>
+
+class Fifo {
+public:
+ Fifo(int singleBufferSize, int totalBufferSize);
+ ~Fifo();
+ int numBytesFilled() const;
+ bool isEmpty() const;
+ bool isFull() const;
+ bool willFill(int additional) const;
+ char* start() const;
+ char* write(int length);
+ char* read(int *const length);
+
+private:
+ int mSingleBufferSize, mWrite, mRead, mReadCommit, mRaggedEnd, mWrapThreshold;
+ sem_t mWaitForSpaceSem, mWaitForDataSem;
+ char* mBuffer;
+ bool mEnd;
+};
+
+#endif //__FIFO_H__
diff --git a/tools/gator/daemon/LocalCapture.cpp b/tools/gator/daemon/LocalCapture.cpp
new file mode 100644
index 000000000000..2dd3affa6084
--- /dev/null
+++ b/tools/gator/daemon/LocalCapture.cpp
@@ -0,0 +1,128 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "LocalCapture.h"
+#include "SessionData.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+
+LocalCapture::LocalCapture() {}
+
+LocalCapture::~LocalCapture() {}
+
+void LocalCapture::createAPCDirectory(char* target_path) {
+ gSessionData->mAPCDir = createUniqueDirectory(target_path, ".apc");
+ if ((removeDirAndAllContents(gSessionData->mAPCDir) != 0 || mkdir(gSessionData->mAPCDir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0)) {
+ logg->logError(__FILE__, __LINE__, "Unable to create directory %s", gSessionData->mAPCDir);
+ handleException();
+ }
+}
+
+void LocalCapture::write(char* string) {
+ char* file = (char*)malloc(PATH_MAX);
+
+ // Set full path
+ snprintf(file, PATH_MAX, "%s/session.xml", gSessionData->mAPCDir);
+
+ // Write the file
+ if (util->writeToDisk(file, string) < 0) {
+ logg->logError(__FILE__, __LINE__, "Error writing %s\nPlease verify the path.", file);
+ handleException();
+ }
+
+ free(file);
+}
+
+char* LocalCapture::createUniqueDirectory(const char* initialPath, const char* ending) {
+ char* output;
+ char* path = (char*)malloc(PATH_MAX);
+
+ // Ensure the path is an absolute path, i.e. starts with a slash
+ if (initialPath == 0 || strlen(initialPath) == 0) {
+ logg->logError(__FILE__, __LINE__, "Missing -o command line option required for a local capture.");
+ handleException();
+ } else if (initialPath[0] != '/') {
+ if (getcwd(path, PATH_MAX) == 0) {
+ logg->logMessage("Unable to retrieve the current working directory");
+ }
+ strncat(path, "/", PATH_MAX - strlen(path) - 1);
+ strncat(path, initialPath, PATH_MAX - strlen(path) - 1);
+ } else {
+ strncpy(path, initialPath, PATH_MAX);
+ path[PATH_MAX - 1] = 0; // strncpy does not guarantee a null-terminated string
+ }
+
+ // Add ending if it is not already there
+ if (strcmp(&path[strlen(path) - strlen(ending)], ending) != 0) {
+ strncat(path, ending, PATH_MAX - strlen(path) - 1);
+ }
+
+ output = strdup(path);
+
+ free(path);
+ return output;
+}
+
+int LocalCapture::removeDirAndAllContents(char* path) {
+ int error = 0;
+ struct stat mFileInfo;
+ // Does the path exist?
+ if (stat(path, &mFileInfo) == 0) {
+ // Is it a directory?
+ if (mFileInfo.st_mode & S_IFDIR) {
+ DIR * dir = opendir(path);
+ dirent* entry = readdir(dir);
+ while (entry) {
+ if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
+ char* newpath = (char*)malloc(strlen(path) + strlen(entry->d_name) + 2);
+ sprintf(newpath, "%s/%s", path, entry->d_name);
+ error = removeDirAndAllContents(newpath);
+ free(newpath);
+ if (error) {
+ break;
+ }
+ }
+ entry = readdir(dir);
+ }
+ closedir(dir);
+ if (error == 0) {
+ error = rmdir(path);
+ }
+ } else {
+ error = remove(path);
+ }
+ }
+ return error;
+}
+
+void LocalCapture::copyImages(ImageLinkList* ptr) {
+ char* dstfilename = (char*)malloc(PATH_MAX);
+
+ while (ptr) {
+ strncpy(dstfilename, gSessionData->mAPCDir, PATH_MAX);
+ dstfilename[PATH_MAX - 1] = 0; // strncpy does not guarantee a null-terminated string
+ if (gSessionData->mAPCDir[strlen(gSessionData->mAPCDir) - 1] != '/') {
+ strncat(dstfilename, "/", PATH_MAX - strlen(dstfilename) - 1);
+ }
+ strncat(dstfilename, util->getFilePart(ptr->path), PATH_MAX - strlen(dstfilename) - 1);
+ if (util->copyFile(ptr->path, dstfilename)) {
+ logg->logMessage("copied file %s to %s", ptr->path, dstfilename);
+ } else {
+ logg->logMessage("copy of file %s to %s failed", ptr->path, dstfilename);
+ }
+
+ ptr = ptr->next;
+ }
+ free(dstfilename);
+}
diff --git a/tools/gator/daemon/LocalCapture.h b/tools/gator/daemon/LocalCapture.h
new file mode 100644
index 000000000000..40f7883237a4
--- /dev/null
+++ b/tools/gator/daemon/LocalCapture.h
@@ -0,0 +1,26 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOCAL_CAPTURE_H__
+#define __LOCAL_CAPTURE_H__
+
+#include "SessionXML.h"
+
+class LocalCapture {
+public:
+ LocalCapture();
+ ~LocalCapture();
+ void write(char* string);
+ void copyImages(ImageLinkList* ptr);
+ void createAPCDirectory(char* target_path);
+private:
+ char* createUniqueDirectory(const char* path, const char* ending);
+ int removeDirAndAllContents(char* path);
+};
+
+#endif //__LOCAL_CAPTURE_H__
diff --git a/tools/gator/daemon/Logging.cpp b/tools/gator/daemon/Logging.cpp
new file mode 100644
index 000000000000..3e6f8a388a46
--- /dev/null
+++ b/tools/gator/daemon/Logging.cpp
@@ -0,0 +1,79 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include "OlyUtility.h"
+
+#ifdef WIN32
+#define MUTEX_INIT() mLoggingMutex = CreateMutex(NULL, false, NULL);
+#define MUTEX_LOCK() WaitForSingleObject(mLoggingMutex, 0xFFFFFFFF);
+#define MUTEX_UNLOCK() ReleaseMutex(mLoggingMutex);
+#define snprintf _snprintf
+#else
+#include <pthread.h>
+#define MUTEX_INIT() pthread_mutex_init(&mLoggingMutex, NULL)
+#define MUTEX_LOCK() pthread_mutex_lock(&mLoggingMutex)
+#define MUTEX_UNLOCK() pthread_mutex_unlock(&mLoggingMutex)
+#endif
+
+#include "Logging.h"
+
+// Global thread-safe logging
+Logging* logg = NULL;
+
+Logging::Logging(bool debug) {
+ mDebug = debug;
+ MUTEX_INIT();
+
+ strcpy(mErrBuf, "Unknown Error");
+ strcpy(mLogBuf, "Unknown Message");
+}
+
+Logging::~Logging() {
+}
+
+void Logging::logError(const char* file, int line, const char* fmt, ...) {
+ va_list args;
+
+ MUTEX_LOCK();
+ if (mDebug) {
+ snprintf(mErrBuf, sizeof(mErrBuf), "ERROR[%s:%d]: ", file, line);
+ } else {
+ mErrBuf[0] = 0;
+ }
+
+ va_start(args, fmt);
+ vsnprintf(mErrBuf + strlen(mErrBuf), sizeof(mErrBuf) - 2 - strlen(mErrBuf), fmt, args); // subtract 2 for \n and \0
+ va_end(args);
+
+ if (strlen(mErrBuf) > 0) {
+ strcat(mErrBuf, "\n");
+ }
+ MUTEX_UNLOCK();
+}
+
+void Logging::logMessage(const char* fmt, ...) {
+ if (mDebug) {
+ va_list args;
+
+ MUTEX_LOCK();
+ strcpy(mLogBuf, "INFO: ");
+
+ va_start(args, fmt);
+ vsnprintf(mLogBuf + strlen(mLogBuf), sizeof(mLogBuf) - 2 - strlen(mLogBuf), fmt, args); // subtract 2 for \n and \0
+ va_end(args);
+ strcat(mLogBuf, "\n");
+
+ fprintf(stdout, "%s", mLogBuf);
+ fflush(stdout);
+ MUTEX_UNLOCK();
+ }
+}
diff --git a/tools/gator/daemon/Logging.h b/tools/gator/daemon/Logging.h
new file mode 100644
index 000000000000..37f7dd3320d6
--- /dev/null
+++ b/tools/gator/daemon/Logging.h
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOGGING_H__
+#define __LOGGING_H__
+
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#ifdef WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#endif
+
+#define DRIVER_ERROR "\n Driver issue:\n >> gator.ko must be built against the current kernel version & configuration\n >> gator.ko should be co-located with gatord in the same directory\n >> OR insmod gator.ko prior to launching gatord"
+
+class Logging {
+public:
+ Logging(bool debug);
+ ~Logging();
+ void logError(const char* file, int line, const char* fmt, ...);
+ void logMessage(const char* fmt, ...);
+ char* getLastError() {return mErrBuf;}
+ char* getLastMessage() {return mLogBuf;}
+
+private:
+ char mErrBuf[4096]; // Arbitrarily large buffer to hold a string
+ char mLogBuf[4096]; // Arbitrarily large buffer to hold a string
+ bool mDebug;
+#ifdef WIN32
+ HANDLE mLoggingMutex;
+#else
+ pthread_mutex_t mLoggingMutex;
+#endif
+};
+
+extern Logging* logg;
+
+extern void handleException();
+
+#endif //__LOGGING_H__
diff --git a/tools/gator/daemon/Makefile b/tools/gator/daemon/Makefile
new file mode 100644
index 000000000000..95d18093e2b8
--- /dev/null
+++ b/tools/gator/daemon/Makefile
@@ -0,0 +1,58 @@
+#
+# Makefile for ARM Streamline - Gator Daemon
+#
+
+# Uncomment and define CROSS_COMPILE if it is not already defined
+# CROSS_COMPILE=/path/to/cross-compiler/arm-linux-gnueabihf-
+# NOTE: This toolchain uses the hardfloat abi by default. For non-hardfloat
+# targets it is necessary to add options
+# '-marm -march=armv4t -mfloat-abi=soft'.
+
+CPP = $(CROSS_COMPILE)g++
+GCC = $(CROSS_COMPILE)gcc
+
+# -g produces debugging information
+# -O3 maximum optimization
+# -O0 no optimization, used for debugging
+# -Wall enables most warnings
+# -Werror treats warnings as errors
+# -std=c++0x is the planned new c++ standard
+# -std=c++98 is the 1998 c++ standard
+# -mthumb-interwork is required for interworking to ARM or Thumb stdlibc
+CFLAGS = -O3 -Wall -mthumb-interwork -fno-exceptions
+CXXFLAGS = -fno-rtti
+ifeq ($(WERROR),1)
+ CFLAGS += -Werror
+endif
+# -s strips the binary of debug info
+LDFLAGS = -s
+TARGET = gatord
+C_SRC = $(wildcard mxml/*.c)
+CPP_SRC = $(wildcard *.cpp)
+
+all: $(TARGET)
+
+events.xml: events_header.xml $(wildcard events-*.xml) events_footer.xml
+ cat $^ > $@
+
+StreamlineSetup.cpp: events_xml.h
+ConfigurationXML.cpp: configuration_xml.h
+
+%_xml.h: %.xml escape
+ ./escape $< > $@
+
+%.o: %.c *.h
+ $(GCC) -c $(CFLAGS) -o $@ $<
+
+%.o: %.cpp *.h
+ $(CPP) -c $(CFLAGS) $(CXXFLAGS) -o $@ $<
+
+$(TARGET): $(CPP_SRC:%.cpp=%.o) $(C_SRC:%.c=%.o)
+ $(CPP) $(LDFLAGS) -o $@ $^ -lc -lrt -lpthread
+ rm -f events_xml.h configuration_xml.h
+
+escape: escape.c
+ gcc $^ -o $@
+
+clean:
+ rm -f *.o mxml/*.o $(TARGET) escape events.xml events_xml.h configuration_xml.h
diff --git a/tools/gator/daemon/OlySocket.cpp b/tools/gator/daemon/OlySocket.cpp
new file mode 100644
index 000000000000..6128dc0c4fb7
--- /dev/null
+++ b/tools/gator/daemon/OlySocket.cpp
@@ -0,0 +1,259 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#ifdef WIN32
+#include <Winsock2.h>
+#else
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include "OlySocket.h"
+#include "Logging.h"
+
+#ifdef WIN32
+#define CLOSE_SOCKET(x) closesocket(x)
+#define SHUTDOWN_RX_TX SD_BOTH
+#define snprintf _snprintf
+#else
+#define CLOSE_SOCKET(x) close(x)
+#define SHUTDOWN_RX_TX SHUT_RDWR
+#endif
+
+OlySocket::OlySocket(int port, bool multiple) {
+#ifdef WIN32
+ WSADATA wsaData;
+ if (WSAStartup(0x0202, &wsaData) != 0) {
+ logg->logError(__FILE__, __LINE__, "Windows socket initialization failed");
+ handleException();
+ }
+#endif
+
+ if (multiple) {
+ createServerSocket(port);
+ } else {
+ createSingleServerConnection(port);
+ }
+}
+
+OlySocket::OlySocket(int port, char* host) {
+ mFDServer = 0;
+ createClientSocket(host, port);
+}
+
+OlySocket::~OlySocket() {
+ if (mSocketID > 0) {
+ CLOSE_SOCKET(mSocketID);
+ }
+}
+
+void OlySocket::shutdownConnection() {
+ // Shutdown is primarily used to unblock other threads that are blocking on send/receive functions
+ shutdown(mSocketID, SHUTDOWN_RX_TX);
+}
+
+void OlySocket::closeSocket() {
+ // Used for closing an accepted socket but keeping the server socket active
+ if (mSocketID > 0) {
+ CLOSE_SOCKET(mSocketID);
+ mSocketID = -1;
+ }
+}
+
+void OlySocket::closeServerSocket() {
+ if (CLOSE_SOCKET(mFDServer) != 0) {
+ logg->logError(__FILE__, __LINE__, "Failed to close server socket.");
+ handleException();
+ }
+ mFDServer = 0;
+}
+
+void OlySocket::createClientSocket(char* hostname, int portno) {
+#ifdef WIN32
+ // TODO: Implement for Windows
+#else
+ char buf[32];
+ struct addrinfo hints, *res, *res0;
+
+ snprintf(buf, sizeof(buf), "%d", portno);
+ mSocketID = -1;
+ memset((void*)&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+
+ if (getaddrinfo(hostname, buf, &hints, &res0)) {
+ logg->logError(__FILE__, __LINE__, "Client socket failed to get address info for %s", hostname);
+ handleException();
+ }
+ for (res=res0; res!=NULL; res = res->ai_next) {
+ if ( res->ai_family != PF_INET || res->ai_socktype != SOCK_STREAM ) {
+ continue;
+ }
+ mSocketID = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
+ if (mSocketID < 0) {
+ continue;
+ }
+ if (connect(mSocketID, res->ai_addr, res->ai_addrlen) < 0) {
+ close(mSocketID);
+ mSocketID = -1;
+ }
+ if (mSocketID > 0) {
+ break;
+ }
+ }
+ freeaddrinfo(res0);
+ if (mSocketID <= 0) {
+ logg->logError(__FILE__, __LINE__, "Could not connect to client socket. Ensure ARM Streamline is running.");
+ handleException();
+ }
+#endif
+}
+
+void OlySocket::createSingleServerConnection(int port) {
+ createServerSocket(port);
+
+ mSocketID = acceptConnection();
+ closeServerSocket();
+}
+
+void OlySocket::createServerSocket(int port) {
+ // Create socket
+ mFDServer = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (mFDServer < 0) {
+ logg->logError(__FILE__, __LINE__, "Error creating server socket");
+ handleException();
+ }
+
+ // Enable address reuse, another solution would be to create the server socket once and only close it when the object exits
+ int on = 1;
+ if (setsockopt(mFDServer, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) != 0) {
+ logg->logError(__FILE__, __LINE__, "Setting server socket options failed");
+ handleException();
+ }
+
+ // Create sockaddr_in structure, ensuring non-populated fields are zero
+ struct sockaddr_in sockaddr;
+ memset((void*)&sockaddr, 0, sizeof(struct sockaddr_in));
+ sockaddr.sin_family = AF_INET;
+ sockaddr.sin_port = htons(port);
+ sockaddr.sin_addr.s_addr = INADDR_ANY;
+
+ // Bind the socket to an address
+ if (bind(mFDServer, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)) < 0) {
+ logg->logError(__FILE__, __LINE__, "Binding of server socket failed.\nIs an instance already running?");
+ handleException();
+ }
+
+ // Listen for connections on this socket
+ if (listen(mFDServer, 1) < 0) {
+ logg->logError(__FILE__, __LINE__, "Listening of server socket failed");
+ handleException();
+ }
+}
+
+// mSocketID is always set to the most recently accepted connection
+// The user of this class should maintain the different socket connections, e.g. by forking the process
+int OlySocket::acceptConnection() {
+ if (mFDServer <= 0) {
+ logg->logError(__FILE__, __LINE__, "Attempting multiple connections on a single connection server socket or attempting to accept on a client socket");
+ handleException();
+ }
+
+ // Accept a connection, note that this call blocks until a client connects
+ mSocketID = accept(mFDServer, NULL, NULL);
+ if (mSocketID < 0) {
+ logg->logError(__FILE__, __LINE__, "Socket acceptance failed");
+ handleException();
+ }
+ return mSocketID;
+}
+
+void OlySocket::send(char* buffer, int size) {
+ if (size <= 0 || buffer == NULL) {
+ return;
+ }
+
+ while (size > 0) {
+ int n = ::send(mSocketID, buffer, size, 0);
+ if (n < 0) {
+ logg->logError(__FILE__, __LINE__, "Socket send error");
+ handleException();
+ }
+ size -= n;
+ buffer += n;
+ }
+}
+
+// Returns the number of bytes received
+int OlySocket::receive(char* buffer, int size) {
+ if (size <= 0 || buffer == NULL) {
+ return 0;
+ }
+
+ int bytes = recv(mSocketID, buffer, size, 0);
+ if (bytes < 0) {
+ logg->logError(__FILE__, __LINE__, "Socket receive error");
+ handleException();
+ } else if (bytes == 0) {
+ logg->logMessage("Socket disconnected");
+ return -1;
+ }
+ return bytes;
+}
+
+// Receive exactly size bytes of data. Note, this function will block until all bytes are received
+int OlySocket::receiveNBytes(char* buffer, int size) {
+ int bytes = 0;
+ while (size > 0 && buffer != NULL) {
+ bytes = recv(mSocketID, buffer, size, 0);
+ if (bytes < 0) {
+ logg->logError(__FILE__, __LINE__, "Socket receive error");
+ handleException();
+ } else if (bytes == 0) {
+ logg->logMessage("Socket disconnected");
+ return -1;
+ }
+ buffer += bytes;
+ size -= bytes;
+ }
+ return bytes;
+}
+
+// Receive data until a carriage return, line feed, or null is encountered, or the buffer fills
+int OlySocket::receiveString(char* buffer, int size) {
+ int bytes_received = 0;
+ bool found = false;
+
+ if (buffer == 0) {
+ return 0;
+ }
+
+ while (!found && bytes_received < size) {
+ // Receive a single character
+ int bytes = recv(mSocketID, &buffer[bytes_received], 1, 0);
+ if (bytes < 0) {
+ logg->logError(__FILE__, __LINE__, "Socket receive error");
+ handleException();
+ } else if (bytes == 0) {
+ logg->logMessage("Socket disconnected");
+ return -1;
+ }
+
+ // Replace carriage returns and line feeds with zero
+ if (buffer[bytes_received] == '\n' || buffer[bytes_received] == '\r' || buffer[bytes_received] == '\0') {
+ buffer[bytes_received] = '\0';
+ found = true;
+ }
+
+ bytes_received++;
+ }
+
+ return bytes_received;
+}
diff --git a/tools/gator/daemon/OlySocket.h b/tools/gator/daemon/OlySocket.h
new file mode 100644
index 000000000000..b306908c001a
--- /dev/null
+++ b/tools/gator/daemon/OlySocket.h
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OLY_SOCKET_H__
+#define __OLY_SOCKET_H__
+
+#include <string.h>
+
+class OlySocket {
+public:
+ OlySocket(int port, bool multipleConnections = false);
+ OlySocket(int port, char* hostname);
+ ~OlySocket();
+ int acceptConnection();
+ void closeSocket();
+ void closeServerSocket();
+ void shutdownConnection();
+ void send(char* buffer, int size);
+ void sendString(const char* string) {send((char*)string, strlen(string));}
+ int receive(char* buffer, int size);
+ int receiveNBytes(char* buffer, int size);
+ int receiveString(char* buffer, int size);
+ int getSocketID() {return mSocketID;}
+private:
+ int mSocketID, mFDServer;
+ void createClientSocket(char* hostname, int port);
+ void createSingleServerConnection(int port);
+ void createServerSocket(int port);
+};
+
+#endif //__OLY_SOCKET_H__
diff --git a/tools/gator/daemon/OlyUtility.cpp b/tools/gator/daemon/OlyUtility.cpp
new file mode 100644
index 000000000000..df4fe221a39a
--- /dev/null
+++ b/tools/gator/daemon/OlyUtility.cpp
@@ -0,0 +1,228 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#if defined(WIN32)
+#include <windows.h>
+#elif defined(__linux__)
+#include <unistd.h>
+#elif defined(DARWIN)
+#include <mach-o/dyld.h>
+#endif
+
+#include "OlyUtility.h"
+
+OlyUtility* util = NULL;
+
+bool OlyUtility::stringToBool(const char* string, bool defValue) {
+ char value[32];
+
+ if (string == NULL) {
+ return defValue;
+ }
+
+ strncpy(value, string, sizeof(value));
+ if (value[0] == 0) {
+ return defValue;
+ }
+ value[sizeof(value) - 1] = 0; // strncpy does not guarantee a null-terminated string
+
+ // Convert to lowercase
+ int i = 0;
+ while (value[i]) {
+ value[i] = tolower(value[i]);
+ i++;
+ }
+
+ if (strcmp(value, "true") == 0 || strcmp(value, "yes") == 0 || strcmp(value, "1") == 0 || strcmp(value, "on") == 0) {
+ return true;
+ } else if (strcmp(value, "false") == 0 || strcmp(value, "no") == 0 || strcmp(value, "0") == 0 || strcmp(value, "off") == 0) {
+ return false;
+ } else {
+ return defValue;
+ }
+}
+
+void OlyUtility::stringToLower(char* string) {
+ if (string == NULL) {
+ return;
+ }
+
+ while (*string) {
+ *string = tolower(*string);
+ string++;
+ }
+}
+
+// Modifies fullpath with the path part including the trailing path separator
+int OlyUtility::getApplicationFullPath(char* fullpath, int sizeOfPath) {
+ memset(fullpath, 0, sizeOfPath);
+#if defined(WIN32)
+ int length = GetModuleFileName(NULL, fullpath, sizeOfPath);
+#elif defined(__linux__)
+ int length = readlink("/proc/self/exe", fullpath, sizeOfPath);
+#elif defined(DARWIN)
+ uint32_t length_u = (uint32_t)sizeOfPath;
+ int length = sizeOfPath;
+ if (_NSGetExecutablePath(fullpath, &length_u) == 0) {
+ length = strlen(fullpath);
+ }
+#endif
+
+ if (length == sizeOfPath) {
+ return -1;
+ }
+
+ fullpath[length] = 0;
+ fullpath = getPathPart(fullpath);
+
+ return 0;
+}
+
+char* OlyUtility::readFromDisk(const char* file, unsigned int *size, bool appendNull) {
+ // Open the file
+ FILE* pFile = fopen(file, "rb");
+ if (pFile==NULL) {
+ return NULL;
+ }
+
+ // Obtain file size
+ fseek(pFile , 0 , SEEK_END);
+ unsigned int lSize = ftell(pFile);
+ rewind(pFile);
+
+ // Allocate memory to contain the whole file
+ char* buffer = (char*)malloc(lSize + (int)appendNull);
+ if (buffer == NULL) {
+ fclose(pFile);
+ return NULL;
+ }
+
+ // Copy the file into the buffer
+ if (fread(buffer, 1, lSize, pFile) != lSize) {
+ free(buffer);
+ fclose(pFile);
+ return NULL;
+ }
+
+ // Terminate
+ fclose(pFile);
+
+ if (appendNull) {
+ buffer[lSize] = 0;
+ }
+
+ if (size) {
+ *size = lSize;
+ }
+
+ return buffer;
+}
+
+int OlyUtility::writeToDisk(const char* path, const char* data) {
+ // Open the file
+ FILE* pFile = fopen(path, "wb");
+ if (pFile == NULL) {
+ return -1;
+ }
+
+ // Write the data to disk
+ if (fwrite(data, 1, strlen(data), pFile) != strlen(data)) {
+ fclose(pFile);
+ return -1;
+ }
+
+ // Terminate
+ fclose(pFile);
+ return 0;
+}
+
+int OlyUtility::appendToDisk(const char* path, const char* data) {
+ // Open the file
+ FILE* pFile = fopen(path, "a");
+ if (pFile == NULL) {
+ return -1;
+ }
+
+ // Write the data to disk
+ if (fwrite(data, 1, strlen(data), pFile) != strlen(data)) {
+ fclose(pFile);
+ return -1;
+ }
+
+ // Terminate
+ fclose(pFile);
+ return 0;
+}
+
+/**
+ * Copies the srcFile into dstFile in 1kB chunks.
+ * The dstFile will be overwritten if it exists.
+ * 0 is returned on an error; otherwise 1.
+ */
+#define TRANSFER_SIZE 1024
+int OlyUtility::copyFile(const char* srcFile, const char* dstFile) {
+ char* buffer = (char*)malloc(TRANSFER_SIZE);
+ FILE * f_src = fopen(srcFile,"rb");
+ if (!f_src) {
+ return 0;
+ }
+ FILE * f_dst = fopen(dstFile,"wb");
+ if (!f_dst) {
+ fclose(f_src);
+ return 0;
+ }
+ while (!feof(f_src)) {
+ int num_bytes_read = fread(buffer, 1, TRANSFER_SIZE, f_src);
+ if (num_bytes_read < TRANSFER_SIZE && !feof(f_src)) {
+ fclose(f_src);
+ fclose(f_dst);
+ return 0;
+ }
+ int num_bytes_written = fwrite(buffer, 1, num_bytes_read, f_dst);
+ if (num_bytes_written != num_bytes_read) {
+ fclose(f_src);
+ fclose(f_dst);
+ return 0;
+ }
+ }
+ fclose(f_src);
+ fclose(f_dst);
+ free(buffer);
+ return 1;
+}
+
+const char* OlyUtility::getFilePart(const char* path) {
+ const char* last_sep = strrchr(path, PATH_SEPARATOR);
+
+ // in case path is not a full path
+ if (last_sep == NULL) {
+ return path;
+ }
+
+ return last_sep++;
+}
+
+// getPathPart may modify the contents of path
+// returns the path including the trailing path separator
+char* OlyUtility::getPathPart(char* path) {
+ char* last_sep = strrchr(path, PATH_SEPARATOR);
+
+ // in case path is not a full path
+ if (last_sep == NULL) {
+ return 0;
+ }
+ last_sep++;
+ *last_sep = 0;
+
+ return (path);
+}
diff --git a/tools/gator/daemon/OlyUtility.h b/tools/gator/daemon/OlyUtility.h
new file mode 100644
index 000000000000..424c583fbe5e
--- /dev/null
+++ b/tools/gator/daemon/OlyUtility.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OLY_UTILITY_H
+#define OLY_UTILITY_H
+
+#ifdef WIN32
+#define PATH_SEPARATOR '\\'
+#define CAIMAN_PATH_MAX MAX_PATH
+#define snprintf _snprintf
+#else
+#include <limits.h>
+#define PATH_SEPARATOR '/'
+#define CAIMAN_PATH_MAX PATH_MAX
+#endif
+
+class OlyUtility {
+public:
+ OlyUtility() {};
+ ~OlyUtility() {};
+ bool stringToBool(const char* string, bool defValue);
+ void stringToLower(char* string);
+ int getApplicationFullPath(char* path, int sizeOfPath);
+ char* readFromDisk(const char* file, unsigned int *size = NULL, bool appendNull = true);
+ int writeToDisk(const char* path, const char* file);
+ int appendToDisk(const char* path, const char* file);
+ int copyFile(const char* srcFile, const char* dstFile);
+ const char* getFilePart(const char* path);
+ char* getPathPart(char* path);
+private:
+};
+
+extern OlyUtility* util;
+
+#endif // OLY_UTILITY_H
diff --git a/tools/gator/daemon/Sender.cpp b/tools/gator/daemon/Sender.cpp
new file mode 100644
index 000000000000..54f2207a2559
--- /dev/null
+++ b/tools/gator/daemon/Sender.cpp
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <arpa/inet.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "Sender.h"
+#include "Logging.h"
+#include "SessionData.h"
+
+Sender::Sender(OlySocket* socket) {
+ mDataFile = NULL;
+ mDataSocket = NULL;
+
+ // Set up the socket connection
+ if (socket) {
+ char streamline[64] = {0};
+ mDataSocket = socket;
+
+ // Receive magic sequence - can wait forever
+ // Streamline will send data prior to the magic sequence for legacy support, which should be ignored for v4+
+ while (strcmp("STREAMLINE", streamline) != 0) {
+ if (mDataSocket->receiveString(streamline, sizeof(streamline)) == -1) {
+ logg->logError(__FILE__, __LINE__, "Socket disconnected");
+ handleException();
+ }
+ }
+
+ // Send magic sequence - must be done first, after which error messages can be sent
+ char magic[32];
+ snprintf(magic, 32, "GATOR %i\n", PROTOCOL_VERSION);
+ mDataSocket->send(magic, strlen(magic));
+
+ gSessionData->mWaitingOnCommand = true;
+ logg->logMessage("Completed magic sequence");
+ }
+
+ pthread_mutex_init(&mSendMutex, NULL);
+}
+
+Sender::~Sender() {
+ delete mDataSocket;
+ mDataSocket = NULL;
+ if (mDataFile) {
+ fclose(mDataFile);
+ }
+}
+
+void Sender::createDataFile(char* apcDir) {
+ if (apcDir == NULL) {
+ return;
+ }
+
+ mDataFileName = (char*)malloc(strlen(apcDir) + 12);
+ sprintf(mDataFileName, "%s/0000000000", apcDir);
+ mDataFile = fopen(mDataFileName, "wb");
+ if (!mDataFile) {
+ logg->logError(__FILE__, __LINE__, "Failed to open binary file: %s", mDataFileName);
+ handleException();
+ }
+}
+
+void Sender::writeData(const char* data, int length, int type) {
+ if (length < 0 || (data == NULL && length > 0)) {
+ return;
+ }
+
+ // Multiple threads call writeData()
+ pthread_mutex_lock(&mSendMutex);
+
+ // Send data over the socket connection
+ if (mDataSocket) {
+ // Start alarm
+ alarm(8);
+
+ // Send data over the socket, sending the type and size first
+ logg->logMessage("Sending data with length %d", length);
+ if (type != RESPONSE_APC_DATA) {
+ // type and length already added by the Collector for apc data
+ mDataSocket->send((char*)&type, 1);
+ mDataSocket->send((char*)&length, sizeof(length));
+ }
+ mDataSocket->send((char*)data, length);
+
+ // Stop alarm
+ alarm(0);
+ }
+
+ // Write data to disk as long as it is not meta data
+ if (mDataFile && type == RESPONSE_APC_DATA) {
+ logg->logMessage("Writing data with length %d", length);
+ // Send data to the data file
+ if (fwrite(data, 1, length, mDataFile) != (unsigned int)length) {
+ logg->logError(__FILE__, __LINE__, "Failed writing binary file %s", mDataFileName);
+ handleException();
+ }
+ }
+
+ pthread_mutex_unlock(&mSendMutex);
+}
diff --git a/tools/gator/daemon/Sender.h b/tools/gator/daemon/Sender.h
new file mode 100644
index 000000000000..ceab343af9fa
--- /dev/null
+++ b/tools/gator/daemon/Sender.h
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SENDER_H__
+#define __SENDER_H__
+
+#include <stdio.h>
+#include <pthread.h>
+#include "OlySocket.h"
+
+enum {
+ RESPONSE_XML = 1,
+ RESPONSE_APC_DATA = 3,
+ RESPONSE_ACK = 4,
+ RESPONSE_NAK = 5,
+ RESPONSE_ERROR = 0xFF
+};
+
+class Sender {
+public:
+ Sender(OlySocket* socket);
+ ~Sender();
+ void writeData(const char* data, int length, int type);
+ void createDataFile(char* apcDir);
+private:
+ OlySocket* mDataSocket;
+ FILE* mDataFile;
+ char* mDataFileName;
+ pthread_mutex_t mSendMutex;
+};
+
+#endif //__SENDER_H__
diff --git a/tools/gator/daemon/SessionData.cpp b/tools/gator/daemon/SessionData.cpp
new file mode 100644
index 000000000000..53a3ea682b72
--- /dev/null
+++ b/tools/gator/daemon/SessionData.cpp
@@ -0,0 +1,100 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include "SessionData.h"
+#include "SessionXML.h"
+#include "Logging.h"
+
+SessionData* gSessionData = NULL;
+
+SessionData::SessionData() {
+ initialize();
+}
+
+SessionData::~SessionData() {
+}
+
+void SessionData::initialize() {
+ mWaitingOnCommand = false;
+ mSessionIsActive = false;
+ mLocalCapture = false;
+ mOneShot = false;
+ strcpy(mCoreName, "unknown");
+ mConfigurationXMLPath = NULL;
+ mSessionXMLPath = NULL;
+ mEventsXMLPath = NULL;
+ mTargetPath = NULL;
+ mAPCDir = NULL;
+ mSampleRate = 0;
+ mDuration = 0;
+ mBacktraceDepth = 0;
+ mTotalBufferSize = 0;
+ mCores = 1;
+
+ initializeCounters();
+}
+
+void SessionData::initializeCounters() {
+ // PMU Counters
+ for (int i = 0; i < MAX_PERFORMANCE_COUNTERS; i++) {
+ mPerfCounterType[i][0] = 0;
+ mPerfCounterTitle[i][0] = 0;
+ mPerfCounterName[i][0] = 0;
+ mPerfCounterDescription[i][0] = 0;
+ mPerfCounterDisplay[i][0] = 0;
+ mPerfCounterUnits[i][0] = 0;
+ mPerfCounterEnabled[i] = 0;
+ mPerfCounterEvent[i] = 0;
+ mPerfCounterColor[i] = 0;
+ mPerfCounterKey[i] = 0;
+ mPerfCounterCount[i] = 0;
+ mPerfCounterPerCPU[i] = false;
+ mPerfCounterEBSCapable[i] = false;
+ mPerfCounterAverageSelection[i] = false;
+ }
+}
+
+void SessionData::parseSessionXML(char* xmlString) {
+ SessionXML session(xmlString);
+ session.parse();
+
+ // Set session data values
+ if (strcmp(session.parameters.sample_rate, "high") == 0) {
+ gSessionData->mSampleRate = 10000;
+ } else if (strcmp(session.parameters.sample_rate, "normal") == 0) {
+ gSessionData->mSampleRate = 1000;
+ } else if (strcmp(session.parameters.sample_rate, "low") == 0) {
+ gSessionData->mSampleRate = 100;
+ } else if (strcmp(session.parameters.sample_rate, "none") == 0) {
+ gSessionData->mSampleRate = 0;
+ } else {
+ logg->logError(__FILE__, __LINE__, "Invalid sample rate (%s) in session xml.", session.parameters.sample_rate);
+ handleException();
+ }
+ gSessionData->mBacktraceDepth = session.parameters.call_stack_unwinding == true ? 128 : 0;
+ gSessionData->mDuration = session.parameters.duration;
+
+ // Determine buffer size (in MB) based on buffer mode
+ gSessionData->mOneShot = true;
+ if (strcmp(session.parameters.buffer_mode, "streaming") == 0) {
+ gSessionData->mOneShot = false;
+ gSessionData->mTotalBufferSize = 1;
+ } else if (strcmp(session.parameters.buffer_mode, "small") == 0) {
+ gSessionData->mTotalBufferSize = 1;
+ } else if (strcmp(session.parameters.buffer_mode, "normal") == 0) {
+ gSessionData->mTotalBufferSize = 4;
+ } else if (strcmp(session.parameters.buffer_mode, "large") == 0) {
+ gSessionData->mTotalBufferSize = 16;
+ } else {
+ logg->logError(__FILE__, __LINE__, "Invalid value for buffer mode in session xml.");
+ handleException();
+ }
+
+ gSessionData->mImages = session.parameters.images;
+}
diff --git a/tools/gator/daemon/SessionData.h b/tools/gator/daemon/SessionData.h
new file mode 100644
index 000000000000..e0e0b7ad8a29
--- /dev/null
+++ b/tools/gator/daemon/SessionData.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SESSION_DATA_H
+#define SESSION_DATA_H
+
+#define MAX_PERFORMANCE_COUNTERS 50
+#define MAX_STRING_LEN 80
+#define MAX_DESCRIPTION_LEN 400
+
+#define PROTOCOL_VERSION 12
+#define PROTOCOL_DEV 1000 // Differentiates development versions (timestamp) from release versions
+
+struct ImageLinkList {
+ char* path;
+ struct ImageLinkList *next;
+};
+
+class SessionData {
+public:
+ SessionData();
+ ~SessionData();
+ void initialize();
+ void initializeCounters();
+ void parseSessionXML(char* xmlString);
+
+ char mCoreName[MAX_STRING_LEN];
+ struct ImageLinkList *mImages;
+ char* mConfigurationXMLPath;
+ char* mSessionXMLPath;
+ char* mEventsXMLPath;
+ char* mTargetPath;
+ char* mAPCDir;
+
+ bool mWaitingOnCommand;
+ bool mSessionIsActive;
+ bool mLocalCapture;
+ bool mOneShot; // halt processing of the driver data until profiling is complete or the buffer is filled
+
+ int mBacktraceDepth;
+ int mTotalBufferSize; // number of MB to use for the entire collection buffer
+ int mSampleRate;
+ int mDuration;
+ int mCores;
+
+ // PMU Counters
+ bool mCounterOverflow;
+ char mPerfCounterType[MAX_PERFORMANCE_COUNTERS][MAX_STRING_LEN];
+ char mPerfCounterTitle[MAX_PERFORMANCE_COUNTERS][MAX_STRING_LEN];
+ char mPerfCounterName[MAX_PERFORMANCE_COUNTERS][MAX_STRING_LEN];
+ char mPerfCounterDescription[MAX_PERFORMANCE_COUNTERS][MAX_DESCRIPTION_LEN];
+ char mPerfCounterDisplay[MAX_PERFORMANCE_COUNTERS][MAX_STRING_LEN];
+ char mPerfCounterUnits[MAX_PERFORMANCE_COUNTERS][MAX_STRING_LEN];
+ int mPerfCounterEnabled[MAX_PERFORMANCE_COUNTERS];
+ int mPerfCounterEvent[MAX_PERFORMANCE_COUNTERS];
+ int mPerfCounterColor[MAX_PERFORMANCE_COUNTERS];
+ int mPerfCounterCount[MAX_PERFORMANCE_COUNTERS];
+ int mPerfCounterKey[MAX_PERFORMANCE_COUNTERS];
+ bool mPerfCounterPerCPU[MAX_PERFORMANCE_COUNTERS];
+ bool mPerfCounterEBSCapable[MAX_PERFORMANCE_COUNTERS];
+ bool mPerfCounterAverageSelection[MAX_PERFORMANCE_COUNTERS];
+};
+
+extern SessionData* gSessionData;
+
+#endif // SESSION_DATA_H
diff --git a/tools/gator/daemon/SessionXML.cpp b/tools/gator/daemon/SessionXML.cpp
new file mode 100644
index 000000000000..b2b6c3083c05
--- /dev/null
+++ b/tools/gator/daemon/SessionXML.cpp
@@ -0,0 +1,105 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "SessionXML.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+
+static const char* TAG_SESSION = "session";
+static const char* TAG_IMAGE = "image";
+
+static const char* ATTR_VERSION = "version";
+static const char* ATTR_CALL_STACK_UNWINDING = "call_stack_unwinding";
+static const char* ATTR_BUFFER_MODE = "buffer_mode";
+static const char* ATTR_SAMPLE_RATE = "sample_rate";
+static const char* ATTR_DURATION = "duration";
+static const char* ATTR_PATH = "path";
+
+SessionXML::SessionXML(const char* str) {
+ parameters.buffer_mode[0] = 0;
+ parameters.sample_rate[0] = 0;
+ parameters.duration = 0;
+ parameters.call_stack_unwinding = false;
+ parameters.images = NULL;
+ mPath = 0;
+ mSessionXML = (char*)str;
+ logg->logMessage(mSessionXML);
+}
+
+SessionXML::~SessionXML() {
+ if (mPath != 0) {
+ free(mSessionXML);
+ }
+}
+
+void SessionXML::parse() {
+ mxml_node_t *tree;
+ mxml_node_t *node;
+
+ tree = mxmlLoadString(NULL, mSessionXML, MXML_NO_CALLBACK);
+ node = mxmlFindElement(tree, tree, TAG_SESSION, NULL, NULL, MXML_DESCEND);
+
+ if (node) {
+ sessionTag(tree, node);
+ mxmlDelete(tree);
+ return;
+ }
+
+ logg->logError(__FILE__, __LINE__, "No session tag found in the session.xml file");
+ handleException();
+}
+
+void SessionXML::sessionTag(mxml_node_t *tree, mxml_node_t *node) {
+ int version = 0;
+ if (mxmlElementGetAttr(node, ATTR_VERSION)) version = strtol(mxmlElementGetAttr(node, ATTR_VERSION), NULL, 10);
+ if (version != 1) {
+ logg->logError(__FILE__, __LINE__, "Invalid session.xml version: %d", version);
+ handleException();
+ }
+
+ // copy to pre-allocated strings
+ if (mxmlElementGetAttr(node, ATTR_BUFFER_MODE)) {
+ strncpy(parameters.buffer_mode, mxmlElementGetAttr(node, ATTR_BUFFER_MODE), sizeof(parameters.buffer_mode));
+ parameters.buffer_mode[sizeof(parameters.buffer_mode) - 1] = 0; // strncpy does not guarantee a null-terminated string
+ }
+ if (mxmlElementGetAttr(node, ATTR_SAMPLE_RATE)) {
+ strncpy(parameters.sample_rate, mxmlElementGetAttr(node, ATTR_SAMPLE_RATE), sizeof(parameters.sample_rate));
+ parameters.sample_rate[sizeof(parameters.sample_rate) - 1] = 0; // strncpy does not guarantee a null-terminated string
+ }
+
+ // integers/bools
+ parameters.call_stack_unwinding = util->stringToBool(mxmlElementGetAttr(node, ATTR_CALL_STACK_UNWINDING), false);
+ if (mxmlElementGetAttr(node, ATTR_DURATION)) parameters.duration = strtol(mxmlElementGetAttr(node, ATTR_DURATION), NULL, 10);
+
+ // parse subtags
+ node = mxmlGetFirstChild(node);
+ while (node) {
+ if (mxmlGetType(node) != MXML_ELEMENT) {
+ node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
+ continue;
+ }
+ if (strcmp(TAG_IMAGE, mxmlGetElement(node)) == 0) {
+ sessionImage(node);
+ }
+ node = mxmlWalkNext(node, tree, MXML_NO_DESCEND);
+ }
+}
+
+void SessionXML::sessionImage(mxml_node_t *node) {
+ int length = strlen(mxmlElementGetAttr(node, ATTR_PATH));
+ struct ImageLinkList *image;
+
+ image = (struct ImageLinkList *)malloc(sizeof(struct ImageLinkList));
+ image->path = (char*)malloc(length + 1);
+ image->path = strdup(mxmlElementGetAttr(node, ATTR_PATH));
+ image->next = parameters.images;
+ parameters.images = image;
+}
diff --git a/tools/gator/daemon/SessionXML.h b/tools/gator/daemon/SessionXML.h
new file mode 100644
index 000000000000..f7a5641e498a
--- /dev/null
+++ b/tools/gator/daemon/SessionXML.h
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SESSION_XML_H
+#define SESSION_XML_H
+
+#include "mxml/mxml.h"
+#include "SessionData.h"
+
+struct ConfigParameters {
+ char buffer_mode[64]; // buffer mode, "streaming", "low", "normal", "high" defines oneshot and buffer size
+ char sample_rate[64]; // capture mode, "high", "normal", or "low"
+ int duration; // length of profile in seconds
+ bool call_stack_unwinding; // whether stack unwinding is performed
+ struct ImageLinkList *images; // linked list of image strings
+};
+
+class SessionXML {
+public:
+ SessionXML(const char* str);
+ ~SessionXML();
+ void parse();
+ ConfigParameters parameters;
+private:
+ char* mSessionXML;
+ char* mPath;
+ void sessionTag(mxml_node_t *tree, mxml_node_t *node);
+ void sessionImage(mxml_node_t *node);
+};
+
+#endif // SESSION_XML_H
diff --git a/tools/gator/daemon/StreamlineSetup.cpp b/tools/gator/daemon/StreamlineSetup.cpp
new file mode 100644
index 000000000000..d13cf1d2568c
--- /dev/null
+++ b/tools/gator/daemon/StreamlineSetup.cpp
@@ -0,0 +1,326 @@
+/**
+ * Copyright (C) ARM Limited 2011-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include "Sender.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+#include "SessionData.h"
+#include "CapturedXML.h"
+#include "StreamlineSetup.h"
+#include "ConfigurationXML.h"
+
+static const char* TAG_SESSION = "session";
+static const char* TAG_REQUEST = "request";
+static const char* TAG_CONFIGURATIONS = "configurations";
+
+static const char* ATTR_TYPE = "type";
+static const char* VALUE_EVENTS = "events";
+static const char* VALUE_CONFIGURATION = "configuration";
+static const char* VALUE_COUNTERS = "counters";
+static const char* VALUE_SESSION = "session";
+static const char* VALUE_CAPTURED = "captured";
+static const char* VALUE_DEFAULTS = "defaults";
+
+StreamlineSetup::StreamlineSetup(OlySocket* s) {
+ bool ready = false;
+ char* data = NULL;
+ int type;
+
+ mSocket = s;
+ mSessionXML = NULL;
+
+ // Receive commands from Streamline (master)
+ while (!ready) {
+ // receive command over socket
+ gSessionData->mWaitingOnCommand = true;
+ data = readCommand(&type);
+
+ // parse and handle data
+ switch (type) {
+ case COMMAND_REQUEST_XML:
+ handleRequest(data);
+ break;
+ case COMMAND_DELIVER_XML:
+ handleDeliver(data);
+ break;
+ case COMMAND_APC_START:
+ logg->logMessage("Received apc start request");
+ ready = true;
+ break;
+ case COMMAND_APC_STOP:
+ logg->logMessage("Received apc stop request before apc start request");
+ exit(0);
+ break;
+ case COMMAND_DISCONNECT:
+ logg->logMessage("Received disconnect command");
+ exit(0);
+ break;
+ case COMMAND_PING:
+ logg->logMessage("Received ping command");
+ sendData(NULL, 0, RESPONSE_ACK);
+ break;
+ default:
+ logg->logError(__FILE__, __LINE__, "Target error: Unknown command type, %d", type);
+ handleException();
+ }
+
+ free(data);
+ }
+
+ if (gSessionData->mCounterOverflow) {
+ logg->logError(__FILE__, __LINE__, "Exceeded maximum number of %d performance counters", MAX_PERFORMANCE_COUNTERS);
+ handleException();
+ }
+}
+
+StreamlineSetup::~StreamlineSetup() {
+ if (mSessionXML) {
+ free(mSessionXML);
+ }
+}
+
+char* StreamlineSetup::readCommand(int* command) {
+ char type;
+ char* data;
+ int response, length;
+
+ // receive type
+ response = mSocket->receiveNBytes(&type, sizeof(type));
+
+ // After receiving a single byte, we are no longer waiting on a command
+ gSessionData->mWaitingOnCommand = false;
+
+ if (response < 0) {
+ logg->logError(__FILE__, __LINE__, "Target error: Unexpected socket disconnect");
+ handleException();
+ }
+
+ // receive length
+ response = mSocket->receiveNBytes((char*)&length, sizeof(length));
+ if (response < 0) {
+ logg->logError(__FILE__, __LINE__, "Target error: Unexpected socket disconnect");
+ handleException();
+ }
+
+ // add artificial limit
+ if ((length < 0) || length > 1024 * 1024) {
+ logg->logError(__FILE__, __LINE__, "Target error: Invalid length received, %d", length);
+ handleException();
+ }
+
+ // allocate memory to contain the xml file, size of zero returns a zero size object
+ data = (char*)calloc(length + 1, 1);
+ if (data == NULL) {
+ logg->logError(__FILE__, __LINE__, "Unable to allocate memory for xml");
+ handleException();
+ }
+
+ // receive data
+ response = mSocket->receiveNBytes(data, length);
+ if (response < 0) {
+ logg->logError(__FILE__, __LINE__, "Target error: Unexpected socket disconnect");
+ handleException();
+ }
+
+ // null terminate the data for string parsing
+ if (length > 0) {
+ data[length] = 0;
+ }
+
+ *command = type;
+ return data;
+}
+
+void StreamlineSetup::handleRequest(char* xml) {
+ mxml_node_t *tree, *node;
+ const char * attr = NULL;
+
+ tree = mxmlLoadString(NULL, xml, MXML_NO_CALLBACK);
+ node = mxmlFindElement(tree, tree, TAG_REQUEST, ATTR_TYPE, NULL, MXML_DESCEND_FIRST);
+ if (node) {
+ attr = mxmlElementGetAttr(node, ATTR_TYPE);
+ }
+ if (attr && strcmp(attr, VALUE_EVENTS) == 0) {
+ sendEvents();
+ logg->logMessage("Sent events xml response");
+ } else if (attr && strcmp(attr, VALUE_CONFIGURATION) == 0) {
+ sendConfiguration();
+ logg->logMessage("Sent configuration xml response");
+ } else if (attr && strcmp(attr, VALUE_COUNTERS) == 0) {
+ sendCounters();
+ logg->logMessage("Sent counters xml response");
+ } else if (attr && strcmp(attr, VALUE_SESSION) == 0) {
+ sendData(mSessionXML, strlen(mSessionXML), RESPONSE_XML);
+ logg->logMessage("Sent session xml response");
+ } else if (attr && strcmp(attr, VALUE_CAPTURED) == 0) {
+ CapturedXML capturedXML;
+ char* capturedText = capturedXML.getXML(false);
+ sendData(capturedText, strlen(capturedText), RESPONSE_XML);
+ free(capturedText);
+ logg->logMessage("Sent captured xml response");
+ } else if (attr && strcmp(attr, VALUE_DEFAULTS) == 0) {
+ sendDefaults();
+ logg->logMessage("Sent default configuration xml response");
+ } else {
+ char error[] = "Unknown request";
+ sendData(error, strlen(error), RESPONSE_NAK);
+ logg->logMessage("Received unknown request:\n%s", xml);
+ }
+
+ mxmlDelete(tree);
+}
+
+void StreamlineSetup::handleDeliver(char* xml) {
+ mxml_node_t *tree;
+
+ // Determine xml type
+ tree = mxmlLoadString(NULL, xml, MXML_NO_CALLBACK);
+ if (mxmlFindElement(tree, tree, TAG_SESSION, NULL, NULL, MXML_DESCEND_FIRST)) {
+ // Session XML
+ gSessionData->parseSessionXML(xml);
+
+ // Save xml
+ mSessionXML = strdup(xml);
+ if (mSessionXML == NULL) {
+ logg->logError(__FILE__, __LINE__, "malloc failed for size %d", strlen(xml) + 1);
+ handleException();
+ }
+ sendData(NULL, 0, RESPONSE_ACK);
+ logg->logMessage("Received session xml");
+ } else if (mxmlFindElement(tree, tree, TAG_CONFIGURATIONS, NULL, NULL, MXML_DESCEND_FIRST)) {
+ // Configuration XML
+ writeConfiguration(xml);
+ sendData(NULL, 0, RESPONSE_ACK);
+ logg->logMessage("Received configuration xml");
+ } else {
+ // Unknown XML
+ logg->logMessage("Received unknown XML delivery type");
+ sendData(NULL, 0, RESPONSE_NAK);
+ }
+
+ mxmlDelete(tree);
+}
+
+void StreamlineSetup::sendData(const char* data, int length, int type) {
+ mSocket->send((char*)&type, 1);
+ mSocket->send((char*)&length, sizeof(length));
+ mSocket->send((char*)data, length);
+}
+
+void StreamlineSetup::sendEvents() {
+#include "events_xml.h" // defines and initializes char events_xml[] and int events_xml_len
+ char* path = (char*)malloc(PATH_MAX);;
+ char* buffer;
+ unsigned int size = 0;
+
+ if (gSessionData->mEventsXMLPath) {
+ strncpy(path, gSessionData->mEventsXMLPath, PATH_MAX);
+ } else {
+ util->getApplicationFullPath(path, PATH_MAX);
+ strncat(path, "events.xml", PATH_MAX - strlen(path) - 1);
+ }
+ buffer = util->readFromDisk(path, &size);
+ if (buffer == NULL) {
+ logg->logMessage("Unable to locate events.xml, using default");
+ buffer = (char*)events_xml;
+ size = events_xml_len;
+ }
+
+ sendData(buffer, size, RESPONSE_XML);
+ if (buffer != (char*)events_xml) {
+ free(buffer);
+ }
+ free(path);
+}
+
+void StreamlineSetup::sendConfiguration() {
+ ConfigurationXML xml;
+
+ const char* string = xml.getConfigurationXML();
+ sendData(string, strlen(string), RESPONSE_XML);
+}
+
+void StreamlineSetup::sendDefaults() {
+ // Send the config built into the binary
+ const char* xml;
+ unsigned int size;
+ ConfigurationXML::getDefaultConfigurationXml(xml, size);
+
+ // Artificial size restriction
+ if (size > 1024*1024) {
+ logg->logError(__FILE__, __LINE__, "Corrupt default configuration file");
+ handleException();
+ }
+
+ sendData(xml, size, RESPONSE_XML);
+}
+
+#include <dirent.h>
+void StreamlineSetup::sendCounters() {
+ struct dirent *ent;
+ mxml_node_t *xml;
+ mxml_node_t *counters;
+ mxml_node_t *counter;
+
+ // counters.xml is simply a file listing of /dev/gator/events
+ DIR* dir = opendir("/dev/gator/events");
+ if (dir == NULL) {
+ logg->logError(__FILE__, __LINE__, "Cannot create counters.xml since unable to read /dev/gator/events");
+ handleException();
+ }
+
+ xml = mxmlNewXML("1.0");
+ counters = mxmlNewElement(xml, "counters");
+ while ((ent = readdir(dir)) != NULL) {
+ // skip hidden files, current dir, and parent dir
+ if (ent->d_name[0] == '.')
+ continue;
+ counter = mxmlNewElement(counters, "counter");
+ mxmlElementSetAttr(counter, "name", ent->d_name);
+ }
+ closedir (dir);
+
+ char* string = mxmlSaveAllocString(xml, mxmlWhitespaceCB);
+ sendString(string, RESPONSE_XML);
+
+ free(string);
+ mxmlDelete(xml);
+}
+
+void StreamlineSetup::writeConfiguration(char* xml) {
+ char* path = (char*)malloc(PATH_MAX);
+
+ if (gSessionData->mConfigurationXMLPath) {
+ strncpy(path, gSessionData->mConfigurationXMLPath, PATH_MAX);
+ } else {
+ util->getApplicationFullPath(path, PATH_MAX);
+ strncat(path, "configuration.xml", PATH_MAX - strlen(path) - 1);
+ }
+
+ if (util->writeToDisk(path, xml) < 0) {
+ logg->logError(__FILE__, __LINE__, "Error writing %s\nPlease verify write permissions to this path.", path);
+ handleException();
+ }
+
+ // Re-populate gSessionData with the configuration, as it has now changed
+ new ConfigurationXML();
+ free(path);
+
+ if (gSessionData->mCounterOverflow) {
+ logg->logError(__FILE__, __LINE__, "Exceeded maximum number of %d performance counters", MAX_PERFORMANCE_COUNTERS);
+ handleException();
+ }
+}
diff --git a/tools/gator/daemon/StreamlineSetup.h b/tools/gator/daemon/StreamlineSetup.h
new file mode 100644
index 000000000000..8086fe2d1bbe
--- /dev/null
+++ b/tools/gator/daemon/StreamlineSetup.h
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __STREAMLINE_SETUP_H__
+#define __STREAMLINE_SETUP_H__
+
+#include "OlySocket.h"
+
+// Commands from Streamline
+enum {
+ COMMAND_REQUEST_XML = 0,
+ COMMAND_DELIVER_XML = 1,
+ COMMAND_APC_START = 2,
+ COMMAND_APC_STOP = 3,
+ COMMAND_DISCONNECT = 4,
+ COMMAND_PING = 5
+};
+
+class StreamlineSetup {
+public:
+ StreamlineSetup(OlySocket *socket);
+ ~StreamlineSetup();
+private:
+ int mNumConnections;
+ OlySocket* mSocket;
+ char* mSessionXML;
+
+ char* readCommand(int*);
+ void handleRequest(char* xml);
+ void handleDeliver(char* xml);
+ void sendData(const char* data, int length, int type);
+ void sendString(const char* string, int type) {sendData(string, strlen(string), type);}
+ void sendEvents();
+ void sendConfiguration();
+ void sendDefaults();
+ void sendCounters();
+ void writeConfiguration(char* xml);
+};
+
+#endif //__STREAMLINE_SETUP_H__
diff --git a/tools/gator/daemon/configuration.xml b/tools/gator/daemon/configuration.xml
new file mode 100644
index 000000000000..fbdef31921b6
--- /dev/null
+++ b/tools/gator/daemon/configuration.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding='UTF-8'?>
+<configurations revision="1">
+ <configuration counter="ARM_ARM11_ccnt" title="Clock" name="Cycles" per_cpu="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_ARM11_cnt0" event="0x7" title="Instruction" name="Executed" per_cpu="yes" description="Instructions executed"/>
+ <configuration counter="ARM_ARM11_cnt1" event="0xb" title="Cache" name="Data miss" per_cpu="yes" description="Data cache miss, not including Cache Operations"/>
+ <configuration counter="ARM_ARM11MPCore_ccnt" title="Clock" name="Cycles" per_cpu="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_ARM11MPCore_cnt0" event="0x08" title="Core" name="Instructions" per_cpu="yes" description="Instructions executed"/>
+ <configuration counter="ARM_ARM11MPCore_cnt1" event="0x0b" title="Cache" name="Data read miss" per_cpu="yes" description="Data cache miss, not including Cache Operations"/>
+ <configuration counter="ARM_Cortex-A5_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_Cortex-A5_cnt0" event="0x8" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A5_cnt1" event="0x1" title="Cache" name="Instruction refill" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <configuration counter="ARM_Cortex-A7_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_Cortex-A7_cnt0" event="0x08" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A7_cnt1" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="ARM_Cortex-A7_cnt2" event="0x16" title="Cache" name="L2 data access" per_cpu="yes" supports_event_based_sampling="yes" description="Level 2 data cache access"/>
+ <configuration counter="ARM_Cortex-A8_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_Cortex-A8_cnt0" event="0x8" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A8_cnt1" event="0x44" title="Cache" name="L2 miss" per_cpu="yes" supports_event_based_sampling="yes" description="Any cacheable miss in the L2 cache"/>
+ <configuration counter="ARM_Cortex-A8_cnt2" event="0x43" title="Cache" name="L1 miss" per_cpu="yes" supports_event_based_sampling="yes" description="Any accesses to the L2 cache"/>
+ <configuration counter="ARM_Cortex-A8_cnt3" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="ARM_Cortex-A9_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_Cortex-A9_cnt0" event="0x68" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Counts the number of instructions going through the Register Renaming stage. This number is an approximate number of the total number of instructions speculatively executed, and even more approximate of the total number of instructions architecturally executed"/>
+ <configuration counter="ARM_Cortex-A9_cnt1" event="0x06" title="Instruction" name="Memory read" per_cpu="yes" supports_event_based_sampling="yes" description="Memory-reading instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A9_cnt2" event="0x07" title="Instruction" name="Memory write" per_cpu="yes" supports_event_based_sampling="yes" description="Memory-writing instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A9_cnt3" event="0x03" title="Cache" name="Data refill" per_cpu="yes" supports_event_based_sampling="yes" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <configuration counter="ARM_Cortex-A9_cnt4" event="0x04" title="Cache" name="Data access" per_cpu="yes" supports_event_based_sampling="yes" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <configuration counter="ARM_Cortex-A15_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ARM_Cortex-A15_cnt0" event="0x8" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="ARM_Cortex-A15_cnt1" event="0x16" title="Cache" name="L2 data access" per_cpu="yes" supports_event_based_sampling="yes" description="Level 2 data cache access"/>
+ <configuration counter="ARM_Cortex-A15_cnt2" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="ARM_Cortex-A15_cnt3" event="0x19" title="Bus" name="Access" per_cpu="yes" supports_event_based_sampling="yes" description=""/>
+ <configuration counter="Scorpion_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="Scorpion_cnt0" event="0x08" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="Scorpion_cnt1" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="ScorpionMP_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="ScorpionMP_cnt0" event="0x08" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="ScorpionMP_cnt1" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="Krait_ccnt" title="Clock" name="Cycles" per_cpu="yes" supports_event_based_sampling="yes" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <configuration counter="Krait_cnt0" event="0x08" title="Instruction" name="Executed" per_cpu="yes" supports_event_based_sampling="yes" description="Instruction architecturally executed"/>
+ <configuration counter="Krait_cnt1" event="0x10" title="Branch" name="Mispredicted" per_cpu="yes" supports_event_based_sampling="yes" description="Branch mispredicted or not predicted"/>
+ <configuration counter="Linux_block_rq_wr" title="Disk IO" name="Write" units="B" description="Disk IO Bytes Written"/>
+ <configuration counter="Linux_block_rq_rd" title="Disk IO" name="Read" units="B" description="Disk IO Bytes Read"/>
+ <configuration counter="Linux_meminfo_memused" title="Memory" name="Used" display="maximum" units="B" average_selection="yes" description="Total used memory size"/>
+ <configuration counter="Linux_meminfo_memfree" title="Memory" name="Free" display="minimum" units="B" average_selection="yes" description="Available memory size"/>
+ <configuration counter="Linux_power_cpu_freq" title="Clock" name="Frequency" per_cpu="yes" display="maximum" units="Hz" average_selection="yes" description="Frequency setting of the CPU"/>
+ <configuration counter="L2C-310_cnt0" event="0x1" title="L2 Cache" name="CO" description="Eviction, CastOUT, of a line from the L2 cache"/>
+</configurations>
diff --git a/tools/gator/daemon/escape.c b/tools/gator/daemon/escape.c
new file mode 100644
index 000000000000..c0f47f1e08b1
--- /dev/null
+++ b/tools/gator/daemon/escape.c
@@ -0,0 +1,63 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+static void print_escaped_path(char *path) {
+ if (isdigit(*path)) {
+ printf("__");
+ }
+ for (; *path != '\0'; ++path) {
+ printf("%c", isalnum(*path) ? *path : '_');
+ }
+}
+
+int main(int argc, char *argv[]) {
+ int i;
+ char *path;
+ FILE *in = NULL;
+ int ch;
+ unsigned int len = 0;
+
+ for (i = 1; i < argc && argv[i][0] == '-'; ++i) ;
+ if (i == argc) {
+ fprintf(stderr, "Usage: %s <filename>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+ path = argv[i];
+
+ errno = 0;
+ if ((in = fopen(path, "r")) == NULL) {
+ fprintf(stderr, "Unable to open '%s': %s\n", path, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ printf("static const unsigned char ");
+ print_escaped_path(path);
+ printf("[] = {");
+ for (; (ch = fgetc(in)) != EOF; ++len) {
+ if (len != 0) {
+ printf(",");
+ }
+ if (len % 12 == 0) {
+ printf("\n ");
+ }
+ printf(" 0x%.2x", ch);
+ }
+ printf("\n};\nstatic const unsigned int ");
+ print_escaped_path(path);
+ printf("_len = %i;\n", len);
+
+ fclose(in);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/gator/daemon/events-ARM11.xml b/tools/gator/daemon/events-ARM11.xml
new file mode 100644
index 000000000000..0a5ee66a2c0a
--- /dev/null
+++ b/tools/gator/daemon/events-ARM11.xml
@@ -0,0 +1,39 @@
+ <counter_set name="ARM_ARM11_cnt" count="3"/>
+ <category name="ARM11" counter_set="ARM_ARM11_cnt" per_cpu="yes">
+ <event counter="ARM_ARM11_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Cache" name="Inst miss" description="Instruction cache miss to a cacheable location, which requires a fetch from external memory"/>
+ <event event="0x01" title="Pipeline" name="Instruction stall" description="Stall because instruction buffer cannot deliver an instruction"/>
+ <event event="0x02" title="Pipeline" name="Data stall" description="Stall because of a data dependency"/>
+ <event event="0x03" title="Cache" name="Inst micro TLB miss" description="Instruction MicroTLB miss (unused on ARM1156)"/>
+ <event event="0x04" title="Cache" name="Data micro TLB miss" description="Data MicroTLB miss (unused on ARM1156)"/>
+ <event event="0x05" title="Branch" name="Instruction executed" description="Branch instruction executed, branch might or might not have changed program flow"/>
+ <event event="0x06" title="Branch" name="Mispredicted" description="Branch mis-predicted"/>
+ <event event="0x07" title="Instruction" name="Executed" description="Instructions executed"/>
+ <event event="0x09" title="Cache" name="Data access" description="Data cache access, not including Cache operations"/>
+ <event event="0x0a" title="Cache" name="Data all access" description="Data cache access, not including Cache Operations regardless of whether or not the location is cacheable"/>
+ <event event="0x0b" title="Cache" name="Data miss" description="Data cache miss, not including Cache Operations"/>
+ <event event="0x0c" title="Cache" name="Write-back" description="Data cache write-back"/>
+ <event event="0x0d" title="Program Counter" name="SW change" description="Software changed the PC"/>
+ <event event="0x0f" title="Cache " name="TLB miss" description="Main TLB miss (unused on ARM1156)"/>
+ <event event="0x10" title="External" name="Access" description="Explicit external data or peripheral access"/>
+ <event event="0x11" title="Cache" name="Data miss" description="Stall because of Load Store Unit request queue being full"/>
+ <event event="0x12" title="Write Buffer" name="Drains" description="The number of times the Write Buffer was drained because of a Data Synchronization Barrier command or Strongly Ordered operation"/>
+ <event event="0x13" title="Disable Interrupts" name="FIQ" description="The number of cycles which FIQ interrupts are disabled (ARM1156 only)"/>
+ <event event="0x14" title="Disable Interrupts" name="IRQ" description="The number of cycles which IRQ interrupts are disabled (ARM1156 only)"/>
+ <event event="0x20" title="ETM" name="ETMEXTOUT[0]" description="ETMEXTOUT[0] signal was asserted for a cycle"/>
+ <event event="0x21" title="ETM" name="ETMEXTOUT[1]" description="ETMEXTOUT[1] signal was asserted for a cycle"/>
+ <event event="0x22" title="ETM" name="ETMEXTOUT[0,1]" description="ETMEXTOUT[0] or ETMEXTOUT[1] was asserted"/>
+ <event event="0x23" title="Procedure" name="Calls" description="Procedure call instruction executed"/>
+ <event event="0x24" title="Procedure" name="Returns" description="Procedure return instruction executed"/>
+ <event event="0x25" title="Procedure" name="Return and predicted" description="Procedure return instruction executed and return address predicted"/>
+ <event event="0x26" title="Procedure" name="Return and mispredicted" description="Procedure return instruction executed and return address predicted incorrectly"/>
+ <event event="0x30" title="Cache" name="Inst tag or parity error" description="Instruction cache Tag or Valid RAM parity error (ARM1156 only)"/>
+ <event event="0x31" title="Cache" name="Inst parity error" description="Instruction cache RAM parity error (ARM1156 only)"/>
+ <event event="0x32" title="Cache" name="Data tag or parity error" description="Data cache Tag or Valid RAM parity error (ARM1156 only)"/>
+ <event event="0x33" title="Cache" name="Data parity error" description="Data cache RAM parity error (ARM1156 only)"/>
+ <event event="0x34" title="ITCM" name="Error" description="ITCM error (ARM1156 only)"/>
+ <event event="0x35" title="DTCM" name="Error" description="DTCM error (ARM1156 only)"/>
+ <event event="0x36" title="Procedure" name="Return address pop" description="Procedure return address popped off the return stack (ARM1156 only)"/>
+ <event event="0x37" title="Procedure" name="Return address misprediction" description="Procedure return address popped off the return stack has been incorrectly predicted by the PFU (ARM1156 only)"/>
+ <event event="0x38" title="Cache" name="Data dirty parity error" description="Data cache Dirty RAM parity error (ARM1156 only)"/>
+ </category>
diff --git a/tools/gator/daemon/events-ARM11MPCore.xml b/tools/gator/daemon/events-ARM11MPCore.xml
new file mode 100644
index 000000000000..1a9ca3fee7db
--- /dev/null
+++ b/tools/gator/daemon/events-ARM11MPCore.xml
@@ -0,0 +1,26 @@
+ <counter_set name="ARM_ARM11MPCore_cnt" count="3"/>
+ <category name="ARM11MPCore" counter_set="ARM_ARM11MPCore_cnt" per_cpu="yes">
+ <event counter="ARM_ARM11MPCore_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Cache" name="Inst miss" description="Instruction cache miss to a cacheable location, which requires a fetch from external memory"/>
+ <event event="0x01" title="Pipeline" name="Instruction stall" description="Stall because instruction buffer cannot deliver an instruction"/>
+ <event event="0x02" title="Pipeline" name="Data stall" description="Stall because of a data dependency"/>
+ <event event="0x03" title="Cache" name="Inst micro TLB miss" description="Instruction MicroTLB miss (unused on ARM1156)"/>
+ <event event="0x04" title="Cache" name="Data micro TLB miss" description="Data MicroTLB miss (unused on ARM1156)"/>
+ <event event="0x05" title="Branch" name="Instruction executed" description="Branch instructions executed, branch might or might not have changed program flow"/>
+ <event event="0x06" title="Branch" name="Not predicted" description="Branch not predicted"/>
+ <event event="0x07" title="Branch" name="Mispredicted" description="Branch mispredicted"/>
+ <event event="0x08" title="Core" name="Instructions" description="Instructions executed"/>
+ <event event="0x09" title="Core" name="Folded Instructions" description="Folded instructions executed"/>
+ <event event="0x0a" title="Cache" name="Data read access" description="Data cache read access, not including cache operations"/>
+ <event event="0x0b" title="Cache" name="Data read miss" description="Data cache miss, not including Cache Operations"/>
+ <event event="0x0c" title="Cache" name="Data write access" description="Data cache write access"/>
+ <event event="0x0d" title="Cache" name="Data write miss" description="Data cache write miss"/>
+ <event event="0x0e" title="Cache" name="Data line eviction" description="Data cache line eviction, not including cache operations"/>
+ <event event="0x0f" title="Branch" name="PC change w/o mode change" description="Software changed the PC and there is not a mode change"/>
+ <event event="0x10" title="Cache " name="TLB miss" description="Main TLB miss"/>
+ <event event="0x11" title="External" name="External Memory request" description="External memory request (cache refill, noncachable, write-back)"/>
+ <event event="0x12" title="Cache" name="Stall" description="Stall because of Load Store Unit request queue being full"/>
+ <event event="0x13" title="Write Buffer" name="Drains" description="The number of times the Write Buffer was drained because of LSU ordering constraints or CP15 operations (Data Synchronization Barrier command) or Strongly Ordered operation"/>
+ <event event="0x14" title="Write Buffer" name="Write Merges" description="Buffered write merged in a store buffer slot"/>
+ <event event="0xFF" title="Core" name="Cycle counter" description="An increment each cycle"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A15.xml b/tools/gator/daemon/events-Cortex-A15.xml
new file mode 100644
index 000000000000..e0bd20164387
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A15.xml
@@ -0,0 +1,68 @@
+ <counter_set name="ARM_Cortex-A15_cnt" count="6"/>
+ <category name="Cortex-A15" counter_set="ARM_Cortex-A15_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ARM_Cortex-A15_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Software increment architecturally executed"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
+ <event event="0x14" title="Cache" name="L1 inst access" description="Instruction cache access"/>
+ <event event="0x15" title="Cache" name="L1 data write" description="Level 1 data cache Write-Back"/>
+ <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
+ <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
+ <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
+ <event event="0x19" title="Bus" name="Access" description=""/>
+ <event event="0x1a" title="Memory" name="Error" description="Local memory error"/>
+ <event event="0x1b" title="Instruction" name="Speculative" description="Instruction speculatively executed"/>
+ <event event="0x1c" title="Memory" name="Translation table" description="Write to translation table base architecturally executed"/>
+ <event event="0x1d" title="Bus" name="Cycle" description=""/>
+ <event event="0x40" title="Cache" name="L1 data read" description="Level 1 data cache access - Read"/>
+ <event event="0x41" title="Cache" name="L1 data write" description="Level 1 data cache access - Write"/>
+ <event event="0x42" title="Cache" name="L1 data refill read" description="Level 1 data cache refill - Read"/>
+ <event event="0x43" title="Cache" name="L1 data refill write" description="Level 1 data cache refill - Write"/>
+ <event event="0x46" title="Cache" name="L1 data victim" description="Level 1 data cache Write-Back - Victim"/>
+ <event event="0x47" title="Cache" name="L1 data clean" description="Level 1 data cache Write-Back - Cleaning and coherency"/>
+ <event event="0x48" title="Cache" name="L1 data invalidate" description="Level 1 data cache invalidate"/>
+ <event event="0x4c" title="TLB" name="L1 data refill read" description="Level 1 data TLB refill - Read"/>
+ <event event="0x4d" title="TLB" name="L1 data refill write" description="Level 1 data TLB refill - Write"/>
+ <event event="0x50" title="Cache" name="L2 data read" description="Level 2 data cache access - Read"/>
+ <event event="0x51" title="Cache" name="L2 data write" description="Level 2 data cache access - Write"/>
+ <event event="0x52" title="Cache" name="L2 data refill read" description="Level 2 data cache refill - Read"/>
+ <event event="0x53" title="Cache" name="L2 data refill write" description="Level 2 data cache refill - Write"/>
+ <event event="0x56" title="Cache" name="L2 data victim" description="Level 2 data cache Write-Back - Victim"/>
+ <event event="0x57" title="Cache" name="L2 data clean" description="Level 2 data cache Write-Back - Cleaning and coherency"/>
+ <event event="0x58" title="Cache" name="L2 data invalidate" description="Level 2 data cache invalidate"/>
+ <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
+ <event event="0x61" title="Bus" name="Write" description="Bus access - Write"/>
+ <event event="0x64" title="Bus" name="Access" description="Bus access - Normal"/>
+ <event event="0x65" title="Bus" name="Peripheral" description="Bus access - Peripheral"/>
+ <event event="0x66" title="Memory" name="Read" description="Data memory access - Read"/>
+ <event event="0x67" title="Memory" name="Write" description="Data memory access - Write"/>
+ <event event="0x68" title="Memory" name="Unaligned Read" description="Unaligned access - Read"/>
+ <event event="0x69" title="Memory" name="Unaligned Write" description="Unaligned access - Write"/>
+ <event event="0x6a" title="Memory" name="Unaligned" description="Unaligned access"/>
+ <event event="0x6c" title="Intrinsic" name="LDREX" description="Exclusive instruction speculatively executed - LDREX"/>
+ <event event="0x6d" title="Intrinsic" name="STREX pass" description="Exclusive instruction speculatively executed - STREX pass"/>
+ <event event="0x6e" title="Intrinsic" name="STREX fail" description="Exclusive instruction speculatively executed - STREX fail"/>
+ <event event="0x70" title="Instruction" name="Load" description="Instruction speculatively executed - Load"/>
+ <event event="0x71" title="Instruction" name="Store" description="Instruction speculatively executed - Store"/>
+ <event event="0x72" title="Instruction" name="Load/Store" description="Instruction speculatively executed - Load or store"/>
+ <event event="0x73" title="Instruction" name="Integer" description="Instruction speculatively executed - Integer data processing"/>
+ <event event="0x74" title="Instruction" name="Advanced SIMD" description="Instruction speculatively executed - Advanced SIMD"/>
+ <event event="0x75" title="Instruction" name="VFP" description="Instruction speculatively executed - VFP"/>
+ <event event="0x76" title="Instruction" name="Software change" description="Instruction speculatively executed - Software change of the PC"/>
+ <event event="0x78" title="Instruction" name="Immediate branch" description="Branch speculatively executed - Immediate branch"/>
+ <event event="0x79" title="Instruction" name="Procedure return" description="Branch speculatively executed - Procedure return"/>
+ <event event="0x7a" title="Instruction" name="Indirect branch" description="Branch speculatively executed - Indirect branch"/>
+ <event event="0x7c" title="Instruction" name="ISB" description="Barrier speculatively executed - ISB"/>
+ <event event="0x7d" title="Instruction" name="DSB" description="Barrier speculatively executed - DSB"/>
+ <event event="0x7e" title="Instruction" name="DMB" description="Barrier speculatively executed - DMB"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A5.xml b/tools/gator/daemon/events-Cortex-A5.xml
new file mode 100644
index 000000000000..4a894d37fbfb
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A5.xml
@@ -0,0 +1,36 @@
+ <counter_set name="ARM_Cortex-A5_cnt" count="2"/>
+ <category name="Cortex-A5" counter_set="ARM_Cortex-A5_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ARM_Cortex-A5_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Branch" name="PC change" description="Software change of the Program Counter, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0e" title="Procedure" name="Return" description="Procedure return, other than exception return, architecturally executed"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
+ <event event="0x14" title="Cache" name="Instruction access" description="Instruction cache access"/>
+ <event event="0x15" title="Cache" name="Data eviction" description="Data cache eviction"/>
+ <event event="0x86" title="Interrupts" name="IRQ" description="IRQ exception taken"/>
+ <event event="0x87" title="Interrupts" name="FIQ" description="FIQ exception taken"/>
+ <event event="0xC0" title="Memory" name="External request" description="External memory request"/>
+ <event event="0xC1" title="Memory" name="Non-cacheable ext req" description="Non-cacheable external memory request"/>
+ <event event="0xC2" title="Cache" name="Linefill" description="Linefill because of prefetch"/>
+ <event event="0xC3" title="Cache" name="Linefill dropped" description="Prefetch linefill dropped"/>
+ <event event="0xC4" title="Cache" name="Allocate mode enter" description="Entering read allocate mode"/>
+ <event event="0xC5" title="Cache" name="Allocate mode" description="Read allocate mode"/>
+ <event event="0xC7" title="ETM" name="ETM Ext Out[0]" description=""/>
+ <event event="0xC8" title="ETM" name="ETM Ext Out[1]" description=""/>
+ <event event="0xC9" title="Instruction" name="Pipeline stall" description="Data Write operation that stalls the pipeline because the store buffer is full"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A53.xml b/tools/gator/daemon/events-Cortex-A53.xml
new file mode 100644
index 000000000000..3fa9c6625a24
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A53.xml
@@ -0,0 +1,171 @@
+ <counter_set name="ARM_Cortex-A53_cnt" count="6"/>
+ <category name="Cortex-A53" counter_set="ARM_Cortex-A53_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <!-- 0x11 CPU_CYCLES - Cycle -->
+ <event counter="ARM_Cortex-A53_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <!-- 0x00 SW_INCR - Instruction architecturally executed (condition check pass) - Software increment -->
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <!-- 0x01 L1I_CACHE_REFILL - Level 1 instruction cache refill -->
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <!-- 0x02 L1I_TLB_REFILL - Level 1 instruction TLB refill -->
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <!-- 0x03 L1D_CACHE_REFILL - Level 1 data cache refill -->
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <!-- 0x04 L1D_CACHE - Level 1 data cache access -->
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <!-- 0x05 L1D_TLB_REFILL - Level 1 data TLB refill -->
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <!-- 0x08 INST_RETIRED - Instruction architecturally executed -->
+ <event event="0x08" title="-" name="INST_RETIRED" description="Instruction architecturally executed"/>
+ <!-- 0x09 EXC_TAKEN - Exception taken -->
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <!-- 0x0A EXC_RETURN - Instruction architecturally executed (condition check pass) - Exception return -->
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <!-- 0x0B CID_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to CONTEXTIDR -->
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <!-- 0x10 BR_MIS_PRED - Mispredicted or not predicted branch speculatively executed -->
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <!-- 0x12 BR_PRED - Predictable branch speculatively executed -->
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <!-- 0x13 MEM_ACCESS - Data memory access -->
+ <event event="0x13" title="-" name="MEM_ACCESS" description="Data memory access"/>
+ <!-- 0x14 L1I_CACHE - Level 1 instruction cache access -->
+ <event event="0x14" title="-" name="L1I_CACHE" description="Level 1 instruction cache access"/>
+ <!-- 0x15 L1D_CACHE_WB - Level 1 data cache Write-Back -->
+ <event event="0x15" title="-" name="L1D_CACHE_WB" description="Level 1 data cache Write-Back"/>
+ <!-- 0x16 L2D_CACHE - Level 2 data cache access -->
+ <event event="0x16" title="-" name="L2D_CACHE" description="Level 2 data cache access"/>
+ <!-- 0x17 L2D_CACHE_REFILL - Level 2 data cache refill -->
+ <event event="0x17" title="-" name="L2D_CACHE_REFILL" description="Level 2 data cache refill"/>
+ <!-- 0x18 L2D_CACHE_WB - Level 2 data cache Write-Back -->
+ <event event="0x18" title="-" name="L2D_CACHE_WB" description="Level 2 data cache Write-Back"/>
+ <!-- 0x19 BUS_ACCESS - Bus access -->
+ <event event="0x19" title="-" name="BUS_ACCESS" description="Bus access"/>
+ <!-- 0x1A MEMORY_ERROR - Local memory error -->
+ <event event="0x1A" title="-" name="MEMORY_ERROR" description="Local memory error"/>
+ <!-- 0x1B INST_SPEC - Operation speculatively executed -->
+ <event event="0x1B" title="-" name="INST_SPEC" description="Operation speculatively executed"/>
+ <!-- 0x1C TTBR_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to translation table base -->
+ <event event="0x1C" title="-" name="TTBR_WRITE_RETIRED" description="Instruction architecturally executed (condition check pass) - Write to translation table base"/>
+ <!-- 0x1D BUS_CYCLES - Bus cycle -->
+ <event event="0x1D" title="-" name="BUS_CYCLES" description="Bus cycle"/>
+ <!-- 0x1E CHAIN - Odd performance counter chain mode -->
+ <event event="0x1E" title="-" name="CHAIN" description="Odd performance counter chain mode"/>
+ <!-- 0x40 L1D_CACHE_LD - Level 1 data cache access - Read -->
+ <event event="0x40" title="-" name="L1D_CACHE_LD" description="Level 1 data cache access - Read"/>
+ <!-- 0x41 L1D_CACHE_ST - Level 1 data cache access - Write -->
+ <event event="0x41" title="-" name="L1D_CACHE_ST" description="Level 1 data cache access - Write"/>
+ <!-- 0x42 L1D_CACHE_REFILL_LD - Level 1 data cache refill - Read -->
+ <event event="0x42" title="-" name="L1D_CACHE_REFILL_LD" description="Level 1 data cache refill - Read"/>
+ <!-- 0x43 L1D_CACHE_REFILL_ST - Level 1 data cache refill - Write -->
+ <event event="0x43" title="-" name="L1D_CACHE_REFILL_ST" description="Level 1 data cache refill - Write"/>
+ <!-- 0x46 L1D_CACHE_WB_VICTIM - Level 1 data cache Write-back - Victim -->
+ <event event="0x46" title="-" name="L1D_CACHE_WB_VICTIM" description="Level 1 data cache Write-back - Victim"/>
+ <!-- 0x47 L1D_CACHE_WB_CLEAN - Level 1 data cache Write-back - Cleaning and coherency -->
+ <event event="0x47" title="-" name="L1D_CACHE_WB_CLEAN" description="Level 1 data cache Write-back - Cleaning and coherency"/>
+ <!-- 0x48 L1D_CACHE_INVAL - Level 1 data cache invalidate -->
+ <event event="0x48" title="-" name="L1D_CACHE_INVAL" description="Level 1 data cache invalidate"/>
+ <!-- 0x4C L1D_TLB_REFILL_LD - Level 1 data TLB refill - Read -->
+ <event event="0x4C" title="-" name="L1D_TLB_REFILL_LD" description="Level 1 data TLB refill - Read"/>
+ <!-- 0x4D L1D_TLB_REFILL_ST - Level 1 data TLB refill - Write -->
+ <event event="0x4D" title="-" name="L1D_TLB_REFILL_ST" description="Level 1 data TLB refill - Write"/>
+ <!-- 0x50 L2D_CACHE_LD - Level 2 data cache access - Read -->
+ <event event="0x50" title="-" name="L2D_CACHE_LD" description="Level 2 data cache access - Read"/>
+ <!-- 0x51 L2D_CACHE_ST - Level 2 data cache access - Write -->
+ <event event="0x51" title="-" name="L2D_CACHE_ST" description="Level 2 data cache access - Write"/>
+ <!-- 0x52 L2D_CACHE_REFILL_LD - Level 2 data cache refill - Read -->
+ <event event="0x52" title="-" name="L2D_CACHE_REFILL_LD" description="Level 2 data cache refill - Read"/>
+ <!-- 0x53 L2D_CACHE_REFILL_ST - Level 2 data cache refill - Write -->
+ <event event="0x53" title="-" name="L2D_CACHE_REFILL_ST" description="Level 2 data cache refill - Write"/>
+ <!-- 0x56 L2D_CACHE_WB_VICTIM - Level 2 data cache Write-back - Victim -->
+ <event event="0x56" title="-" name="L2D_CACHE_WB_VICTIM" description="Level 2 data cache Write-back - Victim"/>
+ <!-- 0x57 L2D_CACHE_WB_CLEAN - Level 2 data cache Write-back - Cleaning and coherency -->
+ <event event="0x57" title="-" name="L2D_CACHE_WB_CLEAN" description="Level 2 data cache Write-back - Cleaning and coherency"/>
+ <!-- 0x58 L2D_CACHE_INVAL - Level 2 data cache invalidate -->
+ <event event="0x58" title="-" name="L2D_CACHE_INVAL" description="Level 2 data cache invalidate"/>
+ <!-- 0x60 BUS_ACCESS_LD - Bus access - Read -->
+ <event event="0x60" title="-" name="BUS_ACCESS_LD" description="Bus access - Read"/>
+ <!-- 0x61 BUS_ACCESS_ST - Bus access - Write -->
+ <event event="0x61" title="-" name="BUS_ACCESS_ST" description="Bus access - Write"/>
+ <!-- 0x62 BUS_ACCESS_SHARED - Bus access - Normal -->
+ <event event="0x62" title="-" name="BUS_ACCESS_SHARED" description="Bus access - Normal"/>
+ <!-- 0x63 BUS_ACCESS_NOT_SHARED - Bus access - Not normal -->
+ <event event="0x63" title="-" name="BUS_ACCESS_NOT_SHARED" description="Bus access - Not normal"/>
+ <!-- 0x64 BUS_ACCESS_NORMAL - Bus access - Normal -->
+ <event event="0x64" title="-" name="BUS_ACCESS_NORMAL" description="Bus access - Normal"/>
+ <!-- 0x65 BUS_ACCESS_PERIPH - Bus access - Peripheral -->
+ <event event="0x65" title="-" name="BUS_ACCESS_PERIPH" description="Bus access - Peripheral"/>
+ <!-- 0x66 MEM_ACCESS_LD - Data memory access - Read -->
+ <event event="0x66" title="-" name="MEM_ACCESS_LD" description="Data memory access - Read"/>
+ <!-- 0x67 MEM_ACCESS_ST - Data memory access - Write -->
+ <event event="0x67" title="-" name="MEM_ACCESS_ST" description="Data memory access - Write"/>
+ <!-- 0x68 UNALIGNED_LD_SPEC - Unaligned access - Read -->
+ <event event="0x68" title="-" name="UNALIGNED_LD_SPEC" description="Unaligned access - Read"/>
+ <!-- 0x69 UNALIGNED_ST_SPEC - Unaligned access - Write -->
+ <event event="0x69" title="-" name="UNALIGNED_ST_SPEC" description="Unaligned access - Write"/>
+ <!-- 0x6A UNALIGNED_LDST_SPEC - Unaligned access -->
+ <event event="0x6A" title="-" name="UNALIGNED_LDST_SPEC" description="Unaligned access"/>
+ <!-- 0x6C LDREX_SPEC - Exclusive operation speculatively executed - LDREX -->
+ <event event="0x6C" title="-" name="LDREX_SPEC" description="Exclusive operation speculatively executed - LDREX"/>
+ <!-- 0x6D STREX_PASS_SPEC - Exclusive instruction speculatively executed - STREX pass -->
+ <event event="0x6D" title="-" name="STREX_PASS_SPEC" description="Exclusive instruction speculatively executed - STREX pass"/>
+ <!-- 0x6E STREX_FAIL_SPEC - Exclusive operation speculatively executed - STREX fail -->
+ <event event="0x6E" title="-" name="STREX_FAIL_SPEC" description="Exclusive operation speculatively executed - STREX fail"/>
+ <!-- 0x70 LD_SPEC - Operation speculatively executed - Load -->
+ <event event="0x70" title="-" name="LD_SPEC" description="Operation speculatively executed - Load"/>
+ <!-- 0x71 ST_SPEC - Operation speculatively executed - Store -->
+ <event event="0x71" title="-" name="ST_SPEC" description="Operation speculatively executed - Store"/>
+ <!-- 0x72 LDST_SPEC - Operation speculatively executed - Load or store -->
+ <event event="0x72" title="-" name="LDST_SPEC" description="Operation speculatively executed - Load or store"/>
+ <!-- 0x73 DP_SPEC - Operation speculatively executed - Integer data processing -->
+ <event event="0x73" title="-" name="DP_SPEC" description="Operation speculatively executed - Integer data processing"/>
+ <!-- 0x74 ASE_SPEC - Operation speculatively executed - Advanced SIMD -->
+ <event event="0x74" title="-" name="ASE_SPEC" description="Operation speculatively executed - Advanced SIMD"/>
+ <!-- 0x75 VFP_SPEC - Operation speculatively executed - VFP -->
+ <event event="0x75" title="-" name="VFP_SPEC" description="Operation speculatively executed - VFP"/>
+ <!-- 0x76 PC_WRITE_SPEC - Operation speculatively executed - Software change of the PC -->
+ <event event="0x76" title="-" name="PC_WRITE_SPEC" description="Operation speculatively executed - Software change of the PC"/>
+ <!-- 0x77 CRYPTO_SPEC - Operation speculatively executed, crypto data processing -->
+ <event event="0x77" title="-" name="CRYPTO_SPEC" description="Operation speculatively executed, crypto data processing"/>
+ <!-- 0x78 BR_IMMED_SPEC - Branch speculatively executed - Immediate branch -->
+ <event event="0x78" title="-" name="BR_IMMED_SPEC" description="Branch speculatively executed - Immediate branch"/>
+ <!-- 0x79 BR_RETURN_SPEC - Branch speculatively executed - Procedure return -->
+ <event event="0x79" title="-" name="BR_RETURN_SPEC" description="Branch speculatively executed - Procedure return"/>
+ <!-- 0x7A BR_INDIRECT_SPEC - Branch speculatively executed - Indirect branch -->
+ <event event="0x7A" title="-" name="BR_INDIRECT_SPEC" description="Branch speculatively executed - Indirect branch"/>
+ <!-- 0x7C ISB_SPEC - Barrier speculatively executed - ISB -->
+ <event event="0x7C" title="-" name="ISB_SPEC" description="Barrier speculatively executed - ISB"/>
+ <!-- 0x7D DSB_SPEC - Barrier speculatively executed - DSB -->
+ <event event="0x7D" title="-" name="DSB_SPEC" description="Barrier speculatively executed - DSB"/>
+ <!-- 0x7E DMB_SPEC - Barrier speculatively executed - DMB -->
+ <event event="0x7E" title="-" name="DMB_SPEC" description="Barrier speculatively executed - DMB"/>
+ <!-- 0x81 EXC_UNDEF - Exception taken, other synchronous -->
+ <event event="0x81" title="-" name="EXC_UNDEF" description="Exception taken, other synchronous"/>
+ <!-- 0x82 EXC_SVC - Exception taken, Supervisor Call -->
+ <event event="0x82" title="-" name="EXC_SVC" description="Exception taken, Supervisor Call"/>
+ <!-- 0x83 EXC_PABORT - Exception taken, Instruction Abort -->
+ <event event="0x83" title="-" name="EXC_PABORT" description="Exception taken, Instruction Abort"/>
+ <!-- 0x84 EXC_DABORT - Exception taken, Data Abort or SError -->
+ <event event="0x84" title="-" name="EXC_DABORT" description="Exception taken, Data Abort or SError"/>
+ <!-- 0x86 EXC_IRQ - Exception taken, IRQ -->
+ <event event="0x86" title="-" name="EXC_IRQ" description="Exception taken, IRQ"/>
+ <!-- 0x87 EXC_FIQ - Exception taken, FIQ -->
+ <event event="0x87" title="-" name="EXC_FIQ" description="Exception taken, FIQ"/>
+ <!-- 0x88 EXC_SMC - Exception taken, Secure Monitor Call -->
+ <event event="0x88" title="-" name="EXC_SMC" description="Exception taken, Secure Monitor Call"/>
+ <!-- 0x8A EXC_HVC - Exception taken, Hypervisor Call -->
+ <event event="0x8A" title="-" name="EXC_HVC" description="Exception taken, Hypervisor Call"/>
+ <!-- 0x8B EXC_TRAP_PABORT - Exception taken, Instruction Abort not taken locally -->
+ <event event="0x8B" title="-" name="EXC_TRAP_PABORT" description="Exception taken, Instruction Abort not taken locally"/>
+ <!-- 0x8C EXC_TRAP_DABORT - Exception taken, Data Abort or SError not taken locally -->
+ <event event="0x8C" title="-" name="EXC_TRAP_DABORT" description="Exception taken, Data Abort or SError not taken locally"/>
+ <!-- 0x8D EXC_TRAP_OTHER - Exception taken – Other traps not taken locally -->
+ <event event="0x8D" title="-" name="EXC_TRAP_OTHER" description="Exception taken – Other traps not taken locally"/>
+ <!-- 0x8E EXC_TRAP_IRQ - Exception taken, IRQ not taken locally -->
+ <event event="0x8E" title="-" name="EXC_TRAP_IRQ" description="Exception taken, IRQ not taken locally"/>
+ <!-- 0x8F EXC_TRAP_FIQ - Exception taken, FIQ not taken locally -->
+ <event event="0x8F" title="-" name="EXC_TRAP_FIQ" description="Exception taken, FIQ not taken locally"/>
+ <!-- 0x90 RC_LD_SPEC - Release consistency instruction speculatively executed – Load Acquire -->
+ <event event="0x90" title="-" name="RC_LD_SPEC" description="Release consistency instruction speculatively executed – Load Acquire"/>
+ <!-- 0x91 RC_ST_SPEC - Release consistency instruction speculatively executed – Store Release -->
+ <event event="0x91" title="-" name="RC_ST_SPEC" description="Release consistency instruction speculatively executed – Store Release"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A57.xml b/tools/gator/daemon/events-Cortex-A57.xml
new file mode 100644
index 000000000000..5db6dc28cd74
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A57.xml
@@ -0,0 +1,171 @@
+ <counter_set name="ARM_Cortex-A57_cnt" count="6"/>
+ <category name="Cortex-A57" counter_set="ARM_Cortex-A57_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <!-- 0x11 CPU_CYCLES - Cycle -->
+ <event counter="ARM_Cortex-A57_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <!-- 0x00 SW_INCR - Instruction architecturally executed number (condition check pass) - Software increment -->
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <!-- 0x01 L1I_CACHE_REFILL - Level 1 instruction cache refill -->
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <!-- 0x02 L1I_TLB_REFILL - Level 1 instruction TLB refill -->
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <!-- 0x03 L1D_CACHE_REFILL - Level 1 data cache refill -->
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <!-- 0x04 L1D_CACHE - Level 1 data cache access -->
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <!-- 0x05 L1D_TLB_REFILL - Level 1 data TLB refill -->
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <!-- 0x08 INST_RETIRED - Instruction architecturally executed -->
+ <event event="0x08" title="-" name="INST_RETIRED" description="Instruction architecturally executed"/>
+ <!-- 0x09 EXC_TAKEN - Exception taken -->
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <!-- 0x0A EXC_RETURN - Instruction architecturally executed (condition check pass) - Exception return -->
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <!-- 0x0B CID_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to CONTEXTIDR -->
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <!-- 0x10 BR_MIS_PRED - Mispredicted or not predicted branch speculatively executed -->
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <!-- 0x12 BR_PRED - Predictable branch speculatively executed -->
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <!-- 0x13 MEM_ACCESS - Data memory access -->
+ <event event="0x13" title="-" name="MEM_ACCESS" description="Data memory access"/>
+ <!-- 0x14 L1I_CACHE - Level 1 instruction cache access -->
+ <event event="0x14" title="-" name="L1I_CACHE" description="Level 1 instruction cache access"/>
+ <!-- 0x15 L1D_CACHE_WB - Level 1 data cache Write-Back -->
+ <event event="0x15" title="-" name="L1D_CACHE_WB" description="Level 1 data cache Write-Back"/>
+ <!-- 0x16 L2D_CACHE - Level 2 data cache access -->
+ <event event="0x16" title="-" name="L2D_CACHE" description="Level 2 data cache access"/>
+ <!-- 0x17 L2D_CACHE_REFILL - Level 2 data cache refill -->
+ <event event="0x17" title="-" name="L2D_CACHE_REFILL" description="Level 2 data cache refill"/>
+ <!-- 0x18 L2D_CACHE_WB - Level 2 data cache Write-Back -->
+ <event event="0x18" title="-" name="L2D_CACHE_WB" description="Level 2 data cache Write-Back"/>
+ <!-- 0x19 BUS_ACCESS - Bus access -->
+ <event event="0x19" title="-" name="BUS_ACCESS" description="Bus access"/>
+ <!-- 0x1A MEMORY_ERROR - Local memory error -->
+ <event event="0x1A" title="-" name="MEMORY_ERROR" description="Local memory error"/>
+ <!-- 0x1B INST_SPEC - Operation speculatively executed -->
+ <event event="0x1B" title="-" name="INST_SPEC" description="Operation speculatively executed"/>
+ <!-- 0x1C TTBR_WRITE_RETIRED - Instruction architecturally executed (condition check pass) - Write to translation table base -->
+ <event event="0x1C" title="-" name="TTBR_WRITE_RETIRED" description="Instruction architecturally executed (condition check pass) - Write to translation table base"/>
+ <!-- 0x1D BUS_CYCLES - Bus cycle -->
+ <event event="0x1D" title="-" name="BUS_CYCLES" description="Bus cycle"/>
+ <!-- 0x1E CHAIN - Odd performance counter chain mode -->
+ <event event="0x1E" title="-" name="CHAIN" description="Odd performance counter chain mode"/>
+ <!-- 0x40 L1D_CACHE_LD - Level 1 data cache access - Read -->
+ <event event="0x40" title="-" name="L1D_CACHE_LD" description="Level 1 data cache access - Read"/>
+ <!-- 0x41 L1D_CACHE_ST - Level 1 data cache access - Write -->
+ <event event="0x41" title="-" name="L1D_CACHE_ST" description="Level 1 data cache access - Write"/>
+ <!-- 0x42 L1D_CACHE_REFILL_LD - Level 1 data cache refill - Read -->
+ <event event="0x42" title="-" name="L1D_CACHE_REFILL_LD" description="Level 1 data cache refill - Read"/>
+ <!-- 0x43 L1D_CACHE_REFILL_ST - Level 1 data cache refill - Write -->
+ <event event="0x43" title="-" name="L1D_CACHE_REFILL_ST" description="Level 1 data cache refill - Write"/>
+ <!-- 0x46 L1D_CACHE_WB_VICTIM - Level 1 data cache Write-back - Victim -->
+ <event event="0x46" title="-" name="L1D_CACHE_WB_VICTIM" description="Level 1 data cache Write-back - Victim"/>
+ <!-- 0x47 L1D_CACHE_WB_CLEAN - Level 1 data cache Write-back - Cleaning and coherency -->
+ <event event="0x47" title="-" name="L1D_CACHE_WB_CLEAN" description="Level 1 data cache Write-back - Cleaning and coherency"/>
+ <!-- 0x48 L1D_CACHE_INVAL - Level 1 data cache invalidate -->
+ <event event="0x48" title="-" name="L1D_CACHE_INVAL" description="Level 1 data cache invalidate"/>
+ <!-- 0x4C L1D_TLB_REFILL_LD - Level 1 data TLB refill - Read -->
+ <event event="0x4C" title="-" name="L1D_TLB_REFILL_LD" description="Level 1 data TLB refill - Read"/>
+ <!-- 0x4D L1D_TLB_REFILL_ST - Level 1 data TLB refill - Write -->
+ <event event="0x4D" title="-" name="L1D_TLB_REFILL_ST" description="Level 1 data TLB refill - Write"/>
+ <!-- 0x50 L2D_CACHE_LD - Level 2 data cache access - Read -->
+ <event event="0x50" title="-" name="L2D_CACHE_LD" description="Level 2 data cache access - Read"/>
+ <!-- 0x51 L2D_CACHE_ST - Level 2 data cache access - Write -->
+ <event event="0x51" title="-" name="L2D_CACHE_ST" description="Level 2 data cache access - Write"/>
+ <!-- 0x52 L2D_CACHE_REFILL_LD - Level 2 data cache refill - Read -->
+ <event event="0x52" title="-" name="L2D_CACHE_REFILL_LD" description="Level 2 data cache refill - Read"/>
+ <!-- 0x53 L2D_CACHE_REFILL_ST - Level 2 data cache refill - Write -->
+ <event event="0x53" title="-" name="L2D_CACHE_REFILL_ST" description="Level 2 data cache refill - Write"/>
+ <!-- 0x56 L2D_CACHE_WB_VICTIM - Level 2 data cache Write-back - Victim -->
+ <event event="0x56" title="-" name="L2D_CACHE_WB_VICTIM" description="Level 2 data cache Write-back - Victim"/>
+ <!-- 0x57 L2D_CACHE_WB_CLEAN - Level 2 data cache Write-back - Cleaning and coherency -->
+ <event event="0x57" title="-" name="L2D_CACHE_WB_CLEAN" description="Level 2 data cache Write-back - Cleaning and coherency"/>
+ <!-- 0x58 L2D_CACHE_INVAL - Level 2 data cache invalidate -->
+ <event event="0x58" title="-" name="L2D_CACHE_INVAL" description="Level 2 data cache invalidate"/>
+ <!-- 0x60 BUS_ACCESS_LD - Bus access - Read -->
+ <event event="0x60" title="-" name="BUS_ACCESS_LD" description="Bus access - Read"/>
+ <!-- 0x61 BUS_ACCESS_ST - Bus access - Write -->
+ <event event="0x61" title="-" name="BUS_ACCESS_ST" description="Bus access - Write"/>
+ <!-- 0x62 BUS_ACCESS_SHARED - Bus access - Normal -->
+ <event event="0x62" title="-" name="BUS_ACCESS_SHARED" description="Bus access - Normal"/>
+ <!-- 0x63 BUS_ACCESS_NOT_SHARED - Bus access - Not normal -->
+ <event event="0x63" title="-" name="BUS_ACCESS_NOT_SHARED" description="Bus access - Not normal"/>
+ <!-- 0x64 BUS_ACCESS_NORMAL - Bus access - Normal -->
+ <event event="0x64" title="-" name="BUS_ACCESS_NORMAL" description="Bus access - Normal"/>
+ <!-- 0x65 BUS_ACCESS_PERIPH - Bus access - Peripheral -->
+ <event event="0x65" title="-" name="BUS_ACCESS_PERIPH" description="Bus access - Peripheral"/>
+ <!-- 0x66 MEM_ACCESS_LD - Data memory access - Read -->
+ <event event="0x66" title="-" name="MEM_ACCESS_LD" description="Data memory access - Read"/>
+ <!-- 0x67 MEM_ACCESS_ST - Data memory access - Write -->
+ <event event="0x67" title="-" name="MEM_ACCESS_ST" description="Data memory access - Write"/>
+ <!-- 0x68 UNALIGNED_LD_SPEC - Unaligned access - Read -->
+ <event event="0x68" title="-" name="UNALIGNED_LD_SPEC" description="Unaligned access - Read"/>
+ <!-- 0x69 UNALIGNED_ST_SPEC - Unaligned access - Write -->
+ <event event="0x69" title="-" name="UNALIGNED_ST_SPEC" description="Unaligned access - Write"/>
+ <!-- 0x6A UNALIGNED_LDST_SPEC - Unaligned access -->
+ <event event="0x6A" title="-" name="UNALIGNED_LDST_SPEC" description="Unaligned access"/>
+ <!-- 0x6C LDREX_SPEC - Exclusive operation speculatively executed - LDREX -->
+ <event event="0x6C" title="-" name="LDREX_SPEC" description="Exclusive operation speculatively executed - LDREX"/>
+ <!-- 0x6D STREX_PASS_SPEC - Exclusive instruction speculatively executed - STREX pass -->
+ <event event="0x6D" title="-" name="STREX_PASS_SPEC" description="Exclusive instruction speculatively executed - STREX pass"/>
+ <!-- 0x6E STREX_FAIL_SPEC - Exclusive operation speculatively executed - STREX fail -->
+ <event event="0x6E" title="-" name="STREX_FAIL_SPEC" description="Exclusive operation speculatively executed - STREX fail"/>
+ <!-- 0x70 LD_SPEC - Operation speculatively executed - Load -->
+ <event event="0x70" title="-" name="LD_SPEC" description="Operation speculatively executed - Load"/>
+ <!-- 0x71 ST_SPEC - Operation speculatively executed - Store -->
+ <event event="0x71" title="-" name="ST_SPEC" description="Operation speculatively executed - Store"/>
+ <!-- 0x72 LDST_SPEC - Operation speculatively executed - Load or store -->
+ <event event="0x72" title="-" name="LDST_SPEC" description="Operation speculatively executed - Load or store"/>
+ <!-- 0x73 DP_SPEC - Operation speculatively executed - Integer data processing -->
+ <event event="0x73" title="-" name="DP_SPEC" description="Operation speculatively executed - Integer data processing"/>
+ <!-- 0x74 ASE_SPEC - Operation speculatively executed - Advanced SIMD -->
+ <event event="0x74" title="-" name="ASE_SPEC" description="Operation speculatively executed - Advanced SIMD"/>
+ <!-- 0x75 VFP_SPEC - Operation speculatively executed - VFP -->
+ <event event="0x75" title="-" name="VFP_SPEC" description="Operation speculatively executed - VFP"/>
+ <!-- 0x76 PC_WRITE_SPEC - Operation speculatively executed - Software change of the PC -->
+ <event event="0x76" title="-" name="PC_WRITE_SPEC" description="Operation speculatively executed - Software change of the PC"/>
+ <!-- 0x77 CRYPTO_SPEC - Operation speculatively executed, crypto data processing -->
+ <event event="0x77" title="-" name="CRYPTO_SPEC" description="Operation speculatively executed, crypto data processing"/>
+ <!-- 0x78 BR_IMMED_SPEC - Branch speculatively executed - Immediate branch -->
+ <event event="0x78" title="-" name="BR_IMMED_SPEC" description="Branch speculatively executed - Immediate branch"/>
+ <!-- 0x79 BR_RETURN_SPEC - Branch speculatively executed - Procedure return -->
+ <event event="0x79" title="-" name="BR_RETURN_SPEC" description="Branch speculatively executed - Procedure return"/>
+ <!-- 0x7A BR_INDIRECT_SPEC - Branch speculatively executed - Indirect branch -->
+ <event event="0x7A" title="-" name="BR_INDIRECT_SPEC" description="Branch speculatively executed - Indirect branch"/>
+ <!-- 0x7C ISB_SPEC - Barrier speculatively executed - ISB -->
+ <event event="0x7C" title="-" name="ISB_SPEC" description="Barrier speculatively executed - ISB"/>
+ <!-- 0x7D DSB_SPEC - Barrier speculatively executed - DSB -->
+ <event event="0x7D" title="-" name="DSB_SPEC" description="Barrier speculatively executed - DSB"/>
+ <!-- 0x7E DMB_SPEC - Barrier speculatively executed - DMB -->
+ <event event="0x7E" title="-" name="DMB_SPEC" description="Barrier speculatively executed - DMB"/>
+ <!-- 0x81 EXC_UNDEF - Exception taken, other synchronous -->
+ <event event="0x81" title="-" name="EXC_UNDEF" description="Exception taken, other synchronous"/>
+ <!-- 0x82 EXC_SVC - Exception taken, Supervisor Call -->
+ <event event="0x82" title="-" name="EXC_SVC" description="Exception taken, Supervisor Call"/>
+ <!-- 0x83 EXC_PABORT - Exception taken, Instruction Abort -->
+ <event event="0x83" title="-" name="EXC_PABORT" description="Exception taken, Instruction Abort"/>
+ <!-- 0x84 EXC_DABORT - Exception taken, Data Abort or SError -->
+ <event event="0x84" title="-" name="EXC_DABORT" description="Exception taken, Data Abort or SError"/>
+ <!-- 0x86 EXC_IRQ - Exception taken, IRQ -->
+ <event event="0x86" title="-" name="EXC_IRQ" description="Exception taken, IRQ"/>
+ <!-- 0x87 EXC_FIQ - Exception taken, FIQ -->
+ <event event="0x87" title="-" name="EXC_FIQ" description="Exception taken, FIQ"/>
+ <!-- 0x88 EXC_SMC - Exception taken, Secure Monitor Call -->
+ <event event="0x88" title="-" name="EXC_SMC" description="Exception taken, Secure Monitor Call"/>
+ <!-- 0x8A EXC_HVC - Exception taken, Hypervisor Call -->
+ <event event="0x8A" title="-" name="EXC_HVC" description="Exception taken, Hypervisor Call"/>
+ <!-- 0x8B EXC_TRAP_PABORT - Exception taken, Instruction Abort not taken locally -->
+ <event event="0x8B" title="-" name="EXC_TRAP_PABORT" description="Exception taken, Instruction Abort not taken locally"/>
+ <!-- 0x8C EXC_TRAP_DABORT - Exception taken, Data Abort or SError not taken locally -->
+ <event event="0x8C" title="-" name="EXC_TRAP_DABORT" description="Exception taken, Data Abort or SError not taken locally"/>
+ <!-- 0x8D EXC_TRAP_OTHER - Exception taken – Other traps not taken locally -->
+ <event event="0x8D" title="-" name="EXC_TRAP_OTHER" description="Exception taken – Other traps not taken locally"/>
+ <!-- 0x8E EXC_TRAP_IRQ - Exception taken, IRQ not taken locally -->
+ <event event="0x8E" title="-" name="EXC_TRAP_IRQ" description="Exception taken, IRQ not taken locally"/>
+ <!-- 0x8F EXC_TRAP_FIQ - Exception taken, FIQ not taken locally -->
+ <event event="0x8F" title="-" name="EXC_TRAP_FIQ" description="Exception taken, FIQ not taken locally"/>
+ <!-- 0x90 RC_LD_SPEC - Release consistency instruction speculatively executed – Load Acquire -->
+ <event event="0x90" title="-" name="RC_LD_SPEC" description="Release consistency instruction speculatively executed – Load Acquire"/>
+ <!-- 0x91 RC_ST_SPEC - Release consistency instruction speculatively executed – Store Release -->
+ <event event="0x91" title="-" name="RC_ST_SPEC" description="Release consistency instruction speculatively executed – Store Release"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A7.xml b/tools/gator/daemon/events-Cortex-A7.xml
new file mode 100644
index 000000000000..50bba7f3b351
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A7.xml
@@ -0,0 +1,44 @@
+ <counter_set name="ARM_Cortex-A7_cnt" count="4"/>
+ <category name="Cortex-A7" counter_set="ARM_Cortex-A7_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ARM_Cortex-A7_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Software increment architecturally executed"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Memory" name="Data Read" description="Data read architecturally executed"/>
+ <event event="0x07" title="Memory" name="Data Write" description="Data write architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Branch" name="PC change" description="Software change of the Program Counter, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x11" title="Cycle" name="Counter" description=""/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x13" title="Memory" name="Memory access" description="Data memory access"/>
+ <event event="0x14" title="Cache" name="L1 inst access" description="Instruction cache access"/>
+ <event event="0x15" title="Cache" name="L1 data eviction" description="Level 1 data cache eviction"/>
+ <event event="0x16" title="Cache" name="L2 data access" description="Level 2 data cache access"/>
+ <event event="0x17" title="Cache" name="L2 data refill" description="Level 2 data cache refill"/>
+ <event event="0x18" title="Cache" name="L2 data write" description="Level 2 data cache Write-Back"/>
+ <event event="0x19" title="Bus" name="Access" description=""/>
+ <event event="0x1d" title="Bus" name="Cycle" description=""/>
+ <event event="0x60" title="Bus" name="Read" description="Bus access - Read"/>
+ <event event="0x61" title="Bus" name="Write" description="Bus access - Write"/>
+ <event event="0x86" title="Exception" name="IRQ" description="IRQ exception taken"/>
+ <event event="0x87" title="Exception" name="FIQ" description="FIQ exception taken"/>
+ <event event="0xC0" title="Memory" name="External request" description="External memory request"/>
+ <event event="0xC1" title="Memory" name="Non-cacheable ext req" description="Non-cacheable external memory request"/>
+ <event event="0xC2" title="Cache" name="Linefill" description="Linefill because of prefetch"/>
+ <event event="0xC3" title="Cache" name="Linefill dropped" description="Prefetch linefill dropped"/>
+ <event event="0xC4" title="Cache" name="Allocate mode enter" description="Entering read allocate mode"/>
+ <event event="0xC5" title="Cache" name="Allocate mode" description="Read allocate mode"/>
+ <event event="0xC7" title="ETM" name="ETM Ext Out[0]" description=""/>
+ <event event="0xC8" title="ETM" name="ETM Ext Out[1]" description=""/>
+ <event event="0xC9" title="Instruction" name="Pipeline stall" description="Data Write operation that stalls the pipeline because the store buffer is full"/>
+ <event event="0xCA" title="Memory" name="Snoop" description="Data snooped from other processor. This event counts memory-read operations that read data from another processor within the local cluster, rather than accessing the L2 cache or issuing an external read."/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A8.xml b/tools/gator/daemon/events-Cortex-A8.xml
new file mode 100644
index 000000000000..fe4c69d09cdc
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A8.xml
@@ -0,0 +1,52 @@
+ <counter_set name="ARM_Cortex-A8_cnt" count="4"/>
+ <category name="Cortex-A8" counter_set="ARM_Cortex-A8_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ARM_Cortex-A8_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Branch" name="PC change" description="Software change of the Program Counter, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0e" title="Procedure" name="Return" description="Procedure return, other than exception return, architecturally executed"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x40" title="Cache" name="Write buffer full" description="Any write buffer full cycle"/>
+ <event event="0x41" title="Cache" name="L2 store" description="Any store that is merged in the L2 memory system"/>
+ <event event="0x42" title="Cache" name="Bufferable transaction" description="Any bufferable store transaction from load/store to L2 cache, excluding eviction or cast out data"/>
+ <event event="0x43" title="Cache" name="L1 miss" description="Any accesses to the L2 cache"/>
+ <event event="0x44" title="Cache" name="L2 miss" description="Any cacheable miss in the L2 cache"/>
+ <event event="0x45" title="AXI" name="Read" description="The number of AXI read data transfers"/>
+ <event event="0x46" title="AXI" name="Write" description="The number of AXI write data transfers"/>
+ <event event="0x47" title="Memory" name="Replay event" description="Any replay event in the memory system"/>
+ <event event="0x48" title="Memory" name="Unaligned access" description="Any unaligned memory access that results in a replay"/>
+ <event event="0x49" title="Cache" name="L1 data hash miss" description="Any L1 data memory access that misses in the cache as a result of the hashing algorithm"/>
+ <event event="0x4a" title="Cache" name="L1 inst hash miss" description="Any L1 instruction memory access that misses in the cache as a result of the hashing algorithm"/>
+ <event event="0x4b" title="Cache" name="L1 page coloring" description="Any L1 data memory access in which a page coloring alias occurs"/>
+ <event event="0x4c" title="NEON" name="L1 cache hit" description="Any NEON access that hits in the L1 data cache"/>
+ <event event="0x4d" title="NEON" name="L1 cache access" description="Any NEON cacheable data accesses for L1 data cache"/>
+ <event event="0x4e" title="NEON" name="L2 cache access" description="Any L2 cache accesses as a result of a NEON memory access"/>
+ <event event="0x4f" title="NEON" name="L2 cache hit" description="Any NEON hit in the L2 cache"/>
+ <event event="0x50" title="Cache" name="L1 inst access" description="Any L1 instruction cache access, excluding CP15 cache accesses"/>
+ <event event="0x51" title="Branch" name="Return stack misprediction" description="Any return stack misprediction because of incorrect target address for a taken return stack pop"/>
+ <event event="0x52" title="Branch" name="Direction misprediction" description="Branch direction misprediction"/>
+ <event event="0x53" title="Branch" name="Taken prediction" description="Any predictable branch that is predicted to be taken"/>
+ <event event="0x54" title="Branch" name="Executed and taken prediction" description="Any predictable branch that is executed and taken"/>
+ <event event="0x55" title="Core" name="Operations issued" description="Number of operations issued, where an operation is either: an instruction or one operation in a sequence of operations that make up a multi-cycle instruction"/>
+ <event event="0x56" title="Core" name="No issue cycles" description="Increment for every cycle that no instructions are available for issue"/>
+ <event event="0x57" title="Core" name="Issue cycles" description="For every cycle, this event counts the number of instructions issued in that cycle. Multi-cycle instructions are only counted once"/>
+ <event event="0x58" title="NEON" name="MRC data wait" description="Number of cycles the processor stalls waiting on MRC data from NEON"/>
+ <event event="0x59" title="NEON" name="Full queue" description="Number of cycles that the processor stalls as a result of a full NEON instruction queue or NEON load queue"/>
+ <event event="0x5a" title="NEON" name="Idle" description="Number of cycles that NEON and integer processors are both not idle"/>
+ <event event="0x70" title="External" name="PMUEXTIN[0]" description="Counts any event from external input source PMUEXTIN[0]"/>
+ <event event="0x71" title="External" name="PMUEXTIN[1]" description="Counts any event from external input source PMUEXTIN[1]"/>
+ <event event="0x72" title="External" name="PMUEXTIN[0,1]" description="Counts any event from both external input sources PMUEXTIN[0] and PMUEXTIN[1]"/>
+ </category>
diff --git a/tools/gator/daemon/events-Cortex-A9.xml b/tools/gator/daemon/events-Cortex-A9.xml
new file mode 100644
index 000000000000..89d6a199a057
--- /dev/null
+++ b/tools/gator/daemon/events-Cortex-A9.xml
@@ -0,0 +1,65 @@
+ <counter_set name="ARM_Cortex-A9_cnt" count="6"/>
+ <category name="Cortex-A9" counter_set="ARM_Cortex-A9_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ARM_Cortex-A9_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Branch" name="PC change" description="Software change of the Program Counter, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x40" title="Java" name="Bytecode execute" description="Counts the number of Java bytecodes being decoded, including speculative ones"/>
+ <event event="0x41" title="Java" name="SW bytecode execute" description="Counts the number of software java bytecodes being decoded, including speculative ones"/>
+ <event event="0x42" title="Jazelle" name="Backward branch execute" description="Counts the number of Jazelle taken branches being executed"/>
+ <event event="0x50" title="Cache" name="Coherency miss" description="Counts the number of coherent linefill requests performed by the Cortex-A9 processor which also miss in all the other Cortex-A9 processors, meaning that the request is sent to the external memory"/>
+ <event event="0x51" title="Cache" name="Coherency hit" description="Counts the number of coherent linefill requests performed by the Cortex-A9 processor which hit in another Cortex-A9 processor, meaning that the linefill data is fetched directly from the relevant Cortex-A9 cache"/>
+ <event event="0x60" title="Cache" name="Inst dependent stall" description="Counts the number of cycles where the processor is ready to accept new instructions, but does not receive any because of the instruction side not being able to provide any and the instruction cache is currently performing at least one linefill"/>
+ <event event="0x61" title="Cache" name="Data dependent stall" description="Counts the number of cycles where the core has some instructions that it cannot issue to any pipeline, and the Load Store unit has at least one pending linefill request, and no pending TLB requests"/>
+ <event event="0x62" title="Cache" name="TLB stall" description="Counts the number of cycles where the processor is stalled waiting for the completion of translation table walks from the main TLB"/>
+ <event event="0x63" title="Intrinsic" name="STREX pass" description="Counts the number of STREX instructions architecturally executed and passed"/>
+ <event event="0x64" title="Intrinsic" name="STREX fail" description="Counts the number of STREX instructions architecturally executed and failed"/>
+ <event event="0x65" title="Cache" name="Data eviction" description="Counts the number of eviction requests because of a linefill in the data cache"/>
+ <event event="0x66" title="Pipeline" name="Issue stage no dispatch" description="Counts the number of cycles where the issue stage does not dispatch any instruction because it is empty or cannot dispatch any instructions"/>
+ <event event="0x67" title="Pipeline" name="Issue stage empty" description="Counts the number of cycles where the issue stage is empty"/>
+ <event event="0x68" title="Instruction" name="Executed" description="Counts the number of instructions going through the Register Renaming stage. This number is an approximate number of the total number of instructions speculatively executed, and even more approximate of the total number of instructions architecturally executed"/>
+ <event event="0x69" title="Cache" name="Data linefills" description="Counts the number of linefills performed on the external AXI bus"/>
+ <event event="0x6A" title="Cache" name="Prefetch linefills" description="Counts the number of data linefills caused by prefetcher requests"/>
+ <event event="0x6B" title="Cache" name="Prefetch hits" description="Counts the number of cache hits in a line that belongs to a stream followed by the prefetcher"/>
+ <event event="0x6E" title="Core" name="Functions" description="Counts the number of procedure returns whose condition codes do not fail, excluding all returns from exception"/>
+ <event event="0x70" title="Instruction" name="Main execution unit" description="Counts the number of instructions being executed in the main execution pipeline of the processor, the multiply pipeline and arithmetic logic unit pipeline"/>
+ <event event="0x71" title="Instruction" name="Second execution unit" description="Counts the number of instructions being executed in the processor second execution pipeline (ALU)"/>
+ <event event="0x72" title="Instruction" name="Load/Store" description="Counts the number of instructions being executed in the Load/Store unit"/>
+ <event event="0x73" title="Instruction" name="Floating point" description="Counts the number of Floating-point instructions going through the Register Rename stage"/>
+ <event event="0x74" title="Instruction" name="NEON" description="Counts the number of NEON instructions going through the Register Rename stage"/>
+ <event event="0x80" title="Stalls" name="PLD" description="Counts the number of cycles where the processor is stalled because PLD slots are all full"/>
+ <event event="0x81" title="Stalls" name="Memory write" description="Counts the number of cycles when the processor is stalled and the data side is stalled too because it is full and executing writes to the external memory"/>
+ <event event="0x82" title="Stalls" name="Inst main TLB miss" description="Counts the number of stall cycles because of main TLB misses on requests issued by the instruction side"/>
+ <event event="0x83" title="Stalls" name="Data main TLB miss" description="Counts the number of stall cycles because of main TLB misses on requests issued by the data side"/>
+ <event event="0x84" title="Stalls" name="Inst micro TLB miss" description="Counts the number of stall cycles because of micro TLB misses on the instruction side"/>
+ <event event="0x85" title="Stalls" name="Data micro TLB miss" description="Counts the number of stall cycles because of micro TLB misses on the data side"/>
+ <event event="0x86" title="Stalls" name="DMB" description="Counts the number of stall cycles because of the execution of a DMB memory barrier"/>
+ <event event="0x8A" title="Clock" name="Integer core" description="Counts the number of cycles during which the integer core clock is enabled"/>
+ <event event="0x8B" title="Clock" name="Data engine" description="Counts the number of cycles during which the Data Engine clock is enabled"/>
+ <event event="0x8C" title="Clock" name="NEON" description="Counts the number of cycles when the NEON SIMD clock is enabled"/>
+ <event event="0x8D" title="Memory" name="TLB inst allocations" description="Counts the number of TLB allocations because of Instruction requests"/>
+ <event event="0x8E" title="Memory" name="TLB data allocations" description="Counts the number of TLB allocations because of Data requests"/>
+ <event event="0x90" title="Instruction" name="ISB" description="Counts the number of ISB instructions architecturally executed"/>
+ <event event="0x91" title="Instruction" name="DSB" description="Counts the number of DSB instructions architecturally executed"/>
+ <event event="0x92" title="Instruction" name="DMB" description="Counts the number of DMB instructions speculatively executed"/>
+ <event event="0x93" title="External" name="Interrupts" description="Counts the number of external interrupts executed by the processor"/>
+ <event event="0xA0" title="PLE" name="Cache line rq completed" description="Counts the number of PLE cache line requests completed"/>
+ <event event="0xA1" title="PLE" name="Cache line rq skipped" description="Counts the number of PLE cache line requests skipped"/>
+ <event event="0xA2" title="PLE" name="FIFO flush" description="Counts the number of PLE FIFO flush requests"/>
+ <event event="0xA3" title="PLE" name="Request completed" description="Counts the number of PLE FIFO flush completed"/>
+ <event event="0xA4" title="PLE" name="FIFO overflow" description="Counts the number of PLE FIFO flush overflowed"/>
+ <event event="0xA5" title="PLE" name="Request programmed" description="Counts the number of PLE FIFO flush program requests"/>
+ </category>
diff --git a/tools/gator/daemon/events-Krait-architected.xml b/tools/gator/daemon/events-Krait-architected.xml
new file mode 100644
index 000000000000..06c1901e5b54
--- /dev/null
+++ b/tools/gator/daemon/events-Krait-architected.xml
@@ -0,0 +1,22 @@
+ <counter_set name="Krait_cnt" count="4"/>
+ <category name="Krait" counter_set="Krait_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="Krait_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Program Counter" name="SW change" description="Software change of PC, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0e" title="Branch" name="Procedure Return" description="Procedure return architecturally executed (not by exceptions)"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ </category>
diff --git a/tools/gator/daemon/events-L2C-310.xml b/tools/gator/daemon/events-L2C-310.xml
new file mode 100644
index 000000000000..4da4d1d63431
--- /dev/null
+++ b/tools/gator/daemon/events-L2C-310.xml
@@ -0,0 +1,18 @@
+ <counter_set name="L2C-310_cnt" count="2"/>
+ <category name="L2C-310" counter_set="L2C-310_cnt" per_cpu="no">
+ <event event="0x1" title="L2 Cache" name="CO" description="Eviction, CastOUT, of a line from the L2 cache"/>
+ <event event="0x2" title="L2 Cache" name="DRH" description="Data read hit"/>
+ <event event="0x3" title="L2 Cache" name="DRREQ" description="Data read request"/>
+ <event event="0x4" title="L2 Cache" name="DWHIT" description="Data write hit"/>
+ <event event="0x5" title="L2 Cache" name="DWREQ" description="Data write request"/>
+ <event event="0x6" title="L2 Cache" name="DWTREQ" description="Data write request with write-through attribute"/>
+ <event event="0x7" title="L2 Cache" name="IRHIT" description="Instruction read hit"/>
+ <event event="0x8" title="L2 Cache" name="IRREQ" description="Instruction read request"/>
+ <event event="0x9" title="L2 Cache" name="WA" description="Write allocate"/>
+ <event event="0xa" title="L2 Cache" name="IPFALLOC" description="Allocation of a prefetch generated by L2C-310 into the L2 cache"/>
+ <event event="0xb" title="L2 Cache" name="EPFHIT" description="Prefetch hint hits in the L2 cache"/>
+ <event event="0xc" title="L2 Cache" name="EPFALLOC" description="Prefetch hint allocated into the L2 cache"/>
+ <event event="0xd" title="L2 Cache" name="SRRCVD" description="Speculative read received"/>
+ <event event="0xe" title="L2 Cache" name="SRCONF" description="Speculative read confirmed"/>
+ <event event="0xf" title="L2 Cache" name="EPFRCVD" description="Prefetch hint received"/>
+ </category>
diff --git a/tools/gator/daemon/events-Linux.xml b/tools/gator/daemon/events-Linux.xml
new file mode 100644
index 000000000000..42baf1438866
--- /dev/null
+++ b/tools/gator/daemon/events-Linux.xml
@@ -0,0 +1,15 @@
+ <category name="Linux">
+ <event counter="Linux_irq_softirq" title="Interrupts" name="SoftIRQ" per_cpu="yes" description="Linux SoftIRQ taken"/>
+ <event counter="Linux_irq_irq" title="Interrupts" name="IRQ" per_cpu="yes" description="Linux IRQ taken"/>
+ <event counter="Linux_block_rq_wr" title="Disk IO" name="Write" units="B" description="Disk IO Bytes Written"/>
+ <event counter="Linux_block_rq_rd" title="Disk IO" name="Read" units="B" description="Disk IO Bytes Read"/>
+ <event counter="Linux_net_rx" title="Network" name="Receive" units="B" description="Receive network traffic, including effect from Streamline"/>
+ <event counter="Linux_net_tx" title="Network" name="Transmit" units="B" description="Transmit network traffic, including effect from Streamline"/>
+ <event counter="Linux_sched_switch" title="Scheduler" name="Switch" per_cpu="yes" description="Context switch events"/>
+ <event counter="Linux_meminfo_memused" title="Memory" name="Used" display="maximum" units="B" average_selection="yes" description="Total used memory size"/>
+ <event counter="Linux_meminfo_memfree" title="Memory" name="Free" display="minimum" units="B" average_selection="yes" description="Available memory size"/>
+ <event counter="Linux_meminfo_bufferram" title="Memory" name="Buffer" display="maximum" units="B" average_selection="yes" description="Memory used by OS disk buffers"/>
+ <event counter="Linux_power_cpu_freq" title="Clock" name="Frequency" per_cpu="yes" display="maximum" units="Hz" average_selection="yes" description="Frequency setting of the CPU"/>
+ <event counter="Linux_power_cpu_idle" title="Power" name="Idle" per_cpu="yes" display="maximum" average_selection="yes" description="CPU Idle State + 1, set the Sample Rate to None to prevent the hrtimer from interrupting the system"/>
+ </category>
+
diff --git a/tools/gator/daemon/events-Mali-400.xml b/tools/gator/daemon/events-Mali-400.xml
new file mode 100644
index 000000000000..cb0b9d51c11c
--- /dev/null
+++ b/tools/gator/daemon/events-Mali-400.xml
@@ -0,0 +1,382 @@
+ <counter_set name="ARM_Mali-400_VP_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_FP0_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_FP1_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_FP2_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_FP3_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_L2_cnt" count="2"/>
+ <counter_set name="ARM_Mali-400_SW_cnt" count="0"/>
+ <counter_set name="ARM_Mali-400_Filmstrip_cnt" count="1"/>
+ <category name="Mali-400-VP" counter_set="ARM_Mali-400_VP_cnt" per_cpu="no">
+ <event event="0x01" title="Mali GPU Vertex Processor" name="Active cycles" description="Number of cycles per frame the MaliGP2 was active."/>
+ <event event="0x02" title="Mali GPU Vertex Processor" name="Active cycles, vertex shader" description="Number of cycles per frame the vertex shader unit was active."/>
+ <event event="0x03" title="Mali GPU Vertex Processor" name="Active cycles, vertex storer" description="Number of cycles per frame the vertex storer unit was active."/>
+ <event event="0x04" title="Mali GPU Vertex Processor" name="Active cycles, vertex loader" description="Number of cycles per frame the vertex loader unit was active."/>
+ <event event="0x05" title="Mali GPU Vertex Processor" name="Cycles vertex loader waiting for vertex shader" description="Number of cycles per frame the vertex loader was idle while waiting on the vertex shader."/>
+ <event event="0x06" title="Mali GPU Vertex Processor" name="Words read, system bus" description="Total number of 64 bit words read by the GP2 from the system bus per frame."/>
+ <event event="0x07" title="Mali GPU Vertex Processor" name="Words written, system bus" description="Total number of 64 bit words written by the GP2 to the system bus per frame."/>
+ <event event="0x08" title="Mali GPU Vertex Processor" name="Read bursts, system bus" description="Number of read bursts by the GP2 from the system bus per frame."/>
+ <event event="0x09" title="Mali GPU Vertex Processor" name="Write bursts, system bus" description="Number of write bursts from the MaliGP2 to the system bus per frame."/>
+ <event event="0x0a" title="Mali GPU Vertex Processor" name="Vertices processed" description="Number of vertices processed by the MaliGP2 per frame."/>
+ <event event="0x0b" title="Mali GPU Vertex Processor" name="Vertices fetched" description="Number of vertices fetched by the MaliGP2 per frame."/>
+ <event event="0x0c" title="Mali GPU Vertex Processor" name="Primitives fetched" description="Number of graphics primitives fetched by the MaliGP2 per frame."/>
+ <event event="0x0e" title="Mali GPU Vertex Processor" name="Primitives culled" description="Number of graphics primitives discarded per frame, because they were seen from the back or were offscreen."/>
+ <event event="0x0f" title="Mali GPU Vertex Processor" name="Commands written to tiles" description="Number of commands (8 Bytes, mainly primitives) written by GP2 to the PP input data structure per frame."/>
+ <event event="0x10" title="Mali GPU Vertex Processor" name="Memory blocks allocated" description="Number of overflow data blocks needed for outputting the PP input data structure per frame ."/>
+ <event event="0x13" title="Mali GPU Vertex Processor" name="Vertex loader cache misses" description="Number of cache misses for the vertex shader's vertex input unit per frame."/>
+ <event event="0x16" title="Mali GPU Vertex Processor" name="Active cycles, vertex shader command processor" description="Number of cycles per frame the GP2 vertex shader command processor was active. This includes time waiting for semaphores."/>
+ <event event="0x17" title="Mali GPU Vertex Processor" name="Active cycles, PLBU command processor" description="Number of cycles per frame the MaliGP2 PLBU command processor was active. This includes time waiting for semaphores."/>
+ <event event="0x18" title="Mali GPU Vertex Processor" name="MaliGP2 PLBU cycles per frame" description="Number of cycles per frame the MaliGP2 PLBU output unit was active. This includes time spent waiting on the bus."/>
+ <event event="0x19" title="Mali GPU Vertex Processor" name="Active cycles, PLBU geometry processing" description="Number of cycles per frame the MaliGP2 PLBU was active, excepting final data output. In other words: active cycles through the prepare list commands. This includes time spent waiting on the bus."/>
+ <event event="0x1b" title="Mali GPU Vertex Processor" name="Active cycles, PLBU primitive assembly" description="Number of active cycles per frame spent by the MaliGP2 PLBU doing primitive assembly. This does not include scissoring or final output. This includes time spent waiting on the bus."/>
+ <event event="0x1c" title="Mali GPU Vertex Processor" name="Active cycles, PLBU vertex fetcher" description="Number of active cycles per frame spent by the MaliGP2 PLBU fetching vertex data. This includes time spent waiting on the bus."/>
+ <event event="0x1e" title="Mali GPU Vertex Processor" name="Active cycles, Bounding-box and command generator" description="Number of active cycles per frame spent by the MaliGP2 PLBU setting up bounding boxes and commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
+ <event event="0x20" title="Mali GPU Vertex Processor" name="Active cycles, Scissor tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over tiles to perform scissoring. This includes time spent waiting on the bus."/>
+ <event event="0x21" title="Mali GPU Vertex Processor" name="Active cycles, PLBU tile iterator" description="Number of active cycles per frame spent by the MaliGP2 PLBU iterating over the tiles in the bounding box generating commands (mainly graphics primitives). This includes time spent waiting on the bus."/>
+ </category>
+ <category name="Mali-400-FP0" counter_set="ARM_Mali-400_FP0_cnt" per_cpu="no">
+ <event event="0x00" title="Mali GPU Fragment Processor 0" name="Active clock cycles" description="Active clock cycles, between polygon start and IRQ."/>
+ <event event="0x02" title="Mali GPU Fragment Processor 0" name="Total bus reads" description="Total number of 64-bit words read from the bus."/>
+ <event event="0x03" title="Mali GPU Fragment Processor 0" name="Total bus writes" description="Total number of 64-bit words written to the bus."/>
+ <event event="0x04" title="Mali GPU Fragment Processor 0" name="Bus read request cycles" description="Number of cycles during which the bus read request signal was HIGH."/>
+ <event event="0x05" title="Mali GPU Fragment Processor 0" name="Bus write request cycles" description="Number of cycles during which the bus write request signal was HIGH."/>
+ <event event="0x06" title="Mali GPU Fragment Processor 0" name="Bus read transactions count" description="Number of read requests accepted by the bus."/>
+ <event event="0x07" title="Mali GPU Fragment Processor 0" name="Bus write transactions" description="Number of write requests accepted by the bus."/>
+ <event event="0x09" title="Mali GPU Fragment Processor 0" name="Tile writeback writes" description="64-bit words written to the bus by the writeback unit."/>
+ <event event="0x0a" title="Mali GPU Fragment Processor 0" name="Store unit writes" description="64-bit words written to the bus by the store unit."/>
+ <event event="0x0d" title="Mali GPU Fragment Processor 0" name="Texture cache uncompressed reads" description="Number of 64-bit words read from the bus into the uncompressed textures cache."/>
+ <event event="0x0e" title="Mali GPU Fragment Processor 0" name="Polygon list reads" description="Number of 64-bit words read from the bus by the polygon list reader."/>
+ <event event="0x0f" title="Mali GPU Fragment Processor 0" name="RSW reads" description="Number of 64-bit words read from the bus into the Render State Word register."/>
+ <event event="0x10" title="Mali GPU Fragment Processor 0" name="Vertex cache reads" description="Number of 64-bit words read from the bus into the vertex cache."/>
+ <event event="0x11" title="Mali GPU Fragment Processor 0" name="Uniform remapping reads" description="Number of 64-bit words read from the bus when reading from the uniform remapping table."/>
+ <event event="0x12" title="Mali GPU Fragment Processor 0" name="Program cache reads" description="Number of 64-bit words read from the bus into the fragment shader program cache."/>
+ <event event="0x13" title="Mali GPU Fragment Processor 0" name="Varying reads" description="Number of 64-bit words containing varyings generated by the vertex processing read from the bus."/>
+ <event event="0x14" title="Mali GPU Fragment Processor 0" name="Texture descriptors reads" description="Number of 64-bit words containing texture descriptors read from the bus."/>
+ <event event="0x15" title="Mali GPU Fragment Processor 0" name="Texture descriptor remapping reads" description="Number of 64-bit words read from the bus when reading from the texture descriptor remapping table."/>
+ <event event="0x17" title="Mali GPU Fragment Processor 0" name="Load unit reads" description="Number of 64-bit words read from the bus by the LOAD sub-instruction."/>
+ <event event="0x18" title="Mali GPU Fragment Processor 0" name="Polygon count" description="Number of triangles read from the polygon list."/>
+ <event event="0x19" title="Mali GPU Fragment Processor 0" name="Pixel rectangle count" description="Number of pixel rectangles read from the polygon list."/>
+ <event event="0x1a" title="Mali GPU Fragment Processor 0" name="Lines count" description="Number of lines read from the polygon list."/>
+ <event event="0x1b" title="Mali GPU Fragment Processor 0" name="Points count" description="Number of points read from the polygon list."/>
+ <event event="0x1c" title="Mali GPU Fragment Processor 0" name="Stall cycles PolygonListReader" description="Number of clock cycles the Polygon List Reader waited for output being collected."/>
+ <event event="0x1d" title="Mali GPU Fragment Processor 0" name="Stall cycles triangle setup" description="Number of clock cycles the TSC waited for input."/>
+ <event event="0x1e" title="Mali GPU Fragment Processor 0" name="Quad rasterized count" description="Number of 2x?2 quads output from rasterizer."/>
+ <event event="0x1f" title="Mali GPU Fragment Processor 0" name="Fragment rasterized count" description="Number of fragment rasterized. Fragments/(Quads*4) gives average actual fragments per quad."/>
+ <event event="0x20" title="Mali GPU Fragment Processor 0" name="Fragment rejected fragment-kill count" description="Number of fragments exiting the fragment shader as killed."/>
+ <event event="0x21" title="Mali GPU Fragment Processor 0" name="Fragment rejected fwd-fragment-kill count" description="Number of fragments killed by forward fragment kill."/>
+ <event event="0x22" title="Mali GPU Fragment Processor 0" name="Fragment passed z/stencil count" description="Number of fragments passing Z and stencil test."/>
+ <event event="0x23" title="Mali GPU Fragment Processor 0" name="Patches rejected early z/stencil count" description="Number of patches rejected by EarlyZ. A patch can be 8x8, 4x4 or 2x2 pixels."/>
+ <event event="0x24" title="Mali GPU Fragment Processor 0" name="Patches evaluated" description="Number of patches evaluated for EarlyZ rejection."/>
+ <event event="0x25" title="Mali GPU Fragment Processor 0" name="Instruction completed count" description="Number of fragment shader instruction words completed. It is a function of pixels processed and the length of the shader programs."/>
+ <event event="0x26" title="Mali GPU Fragment Processor 0" name="Instruction failed rendezvous count" description="Number of fragment shader instructions not completed because of failed Rendezvous."/>
+ <event event="0x27" title="Mali GPU Fragment Processor 0" name="Instruction failed varying-miss count" description="Number of fragment shader instructions not completed because of failed varying operation."/>
+ <event event="0x28" title="Mali GPU Fragment Processor 0" name="Instruction failed texture-miss count" description="Number of fragment shader instructions not completed because of failed texture operation."/>
+ <event event="0x29" title="Mali GPU Fragment Processor 0" name="Instruction failed load-miss count" description="Number of fragment shader instructions not completed because of failed load operation."/>
+ <event event="0x2a" title="Mali GPU Fragment Processor 0" name="Instruction failed tile read-miss count" description="Number of fragment shader instructions not completed because of failed read from the tilebuffer."/>
+ <event event="0x2b" title="Mali GPU Fragment Processor 0" name="Instruction failed store-miss count" description="Number of fragment shader instructions not completed because of failed store operation."/>
+ <event event="0x2c" title="Mali GPU Fragment Processor 0" name="Rendezvous breakage count" description="Number of Rendezvous breakages reported."/>
+ <event event="0x2d" title="Mali GPU Fragment Processor 0" name="Pipeline bubbles cycle count" description="Number of unused cycles in the fragment shader while rendering is active."/>
+ <event event="0x2e" title="Mali GPU Fragment Processor 0" name="Texture mapper multipass count" description="Number of texture operations looped because of more texture passes needed."/>
+ <event event="0x2f" title="Mali GPU Fragment Processor 0" name="Texture mapper cycle count" description="Number of texture operation cycles."/>
+ <event event="0x30" title="Mali GPU Fragment Processor 0" name="Vertex cache hit count" description="Number of times a requested vertex was found in the cache (Number of vertex cache hits)."/>
+ <event event="0x31" title="Mali GPU Fragment Processor 0" name="Vertex cache miss count" description="Number of times a requested vertex was not found in the cache (Number of vertex cache misses)."/>
+ <event event="0x32" title="Mali GPU Fragment Processor 0" name="Varying cache hit count" description="Number of times a requested varying was found in the cache (Number of varying cache hits)."/>
+ <event event="0x33" title="Mali GPU Fragment Processor 0" name="Varying cache miss count" description="Number of times a requested varying was not found in the cache (Number of varying cache misses)."/>
+ <event event="0x34" title="Mali GPU Fragment Processor 0" name="Varying cache conflict miss count" description="Number of times a requested varying was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x35" title="Mali GPU Fragment Processor 0" name="Texture cache hit count" description="Number of times a requested texel was found in the texture cache (Number of texture cache hits)."/>
+ <event event="0x36" title="Mali GPU Fragment Processor 0" name="Texture cache miss count" description="Number of times a requested texel was not found in the texture cache (Number of texture cache misses)."/>
+ <event event="0x37" title="Mali GPU Fragment Processor 0" name="Texture cache conflict miss count" description="Number of times a requested texel was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x38" title="Mali GPU Fragment Processor 0" name="Compressed texture cache hit count" description="Number of times a requested item was found in the cache."/>
+ <event event="0x39" title="Mali GPU Fragment Processor 0" name="Compressed texture cache miss count" description="Number of times a requested item was not found in the cache."/>
+ <event event="0x3a" title="Mali GPU Fragment Processor 0" name="Load/Store cache hit count" description="Number of hits in the load/store cache."/>
+ <event event="0x3b" title="Mali GPU Fragment Processor 0" name="Load/Store cache miss count" description="Number of misses in the load/store cache."/>
+ <event event="0x3c" title="Mali GPU Fragment Processor 0" name="Program cache hit count" description="Number of hits in the program cache."/>
+ <event event="0x3d" title="Mali GPU Fragment Processor 0" name="Program cache miss count" description="Number of misses in the program cache."/>
+ </category>
+ <category name="Mali-400-FP1" counter_set="ARM_Mali-400_FP1_cnt" per_cpu="no">
+ <event event="0x00" title="Mali GPU Fragment Processor 1" name="Active clock cycles" description="Active clock cycles, between polygon start and IRQ."/>
+ <event event="0x02" title="Mali GPU Fragment Processor 1" name="Total bus reads" description="Total number of 64-bit words read from the bus."/>
+ <event event="0x03" title="Mali GPU Fragment Processor 1" name="Total bus writes" description="Total number of 64-bit words written to the bus."/>
+ <event event="0x04" title="Mali GPU Fragment Processor 1" name="Bus read request cycles" description="Number of cycles during which the bus read request signal was HIGH."/>
+ <event event="0x05" title="Mali GPU Fragment Processor 1" name="Bus write request cycles" description="Number of cycles during which the bus write request signal was HIGH."/>
+ <event event="0x06" title="Mali GPU Fragment Processor 1" name="Bus read transactions count" description="Number of read requests accepted by the bus."/>
+ <event event="0x07" title="Mali GPU Fragment Processor 1" name="Bus write transactions" description="Number of write requests accepted by the bus."/>
+ <event event="0x09" title="Mali GPU Fragment Processor 1" name="Tile writeback writes" description="64-bit words written to the bus by the writeback unit."/>
+ <event event="0x0a" title="Mali GPU Fragment Processor 1" name="Store unit writes" description="64-bit words written to the bus by the store unit."/>
+ <event event="0x0d" title="Mali GPU Fragment Processor 1" name="Texture cache uncompressed reads" description="Number of 64-bit words read from the bus into the uncompressed textures cache."/>
+ <event event="0x0e" title="Mali GPU Fragment Processor 1" name="Polygon list reads" description="Number of 64-bit words read from the bus by the polygon list reader."/>
+ <event event="0x0f" title="Mali GPU Fragment Processor 1" name="RSW reads" description="Number of 64-bit words read from the bus into the Render State Word register."/>
+ <event event="0x10" title="Mali GPU Fragment Processor 1" name="Vertex cache reads" description="Number of 64-bit words read from the bus into the vertex cache."/>
+ <event event="0x11" title="Mali GPU Fragment Processor 1" name="Uniform remapping reads" description="Number of 64-bit words read from the bus when reading from the uniform remapping table."/>
+ <event event="0x12" title="Mali GPU Fragment Processor 1" name="Program cache reads" description="Number of 64-bit words read from the bus into the fragment shader program cache."/>
+ <event event="0x13" title="Mali GPU Fragment Processor 1" name="Varying reads" description="Number of 64-bit words containing varyings generated by the vertex processing read from the bus."/>
+ <event event="0x14" title="Mali GPU Fragment Processor 1" name="Texture descriptors reads" description="Number of 64-bit words containing texture descriptors read from the bus."/>
+ <event event="0x15" title="Mali GPU Fragment Processor 1" name="Texture descriptor remapping reads" description="Number of 64-bit words read from the bus when reading from the texture descriptor remapping table."/>
+ <event event="0x17" title="Mali GPU Fragment Processor 1" name="Load unit reads" description="Number of 64-bit words read from the bus by the LOAD sub-instruction."/>
+ <event event="0x18" title="Mali GPU Fragment Processor 1" name="Polygon count" description="Number of triangles read from the polygon list."/>
+ <event event="0x19" title="Mali GPU Fragment Processor 1" name="Pixel rectangle count" description="Number of pixel rectangles read from the polygon list."/>
+ <event event="0x1a" title="Mali GPU Fragment Processor 1" name="Lines count" description="Number of lines read from the polygon list."/>
+ <event event="0x1b" title="Mali GPU Fragment Processor 1" name="Points count" description="Number of points read from the polygon list."/>
+ <event event="0x1c" title="Mali GPU Fragment Processor 1" name="Stall cycles PolygonListReader" description="Number of clock cycles the Polygon List Reader waited for output being collected."/>
+ <event event="0x1d" title="Mali GPU Fragment Processor 1" name="Stall cycles triangle setup" description="Number of clock cycles the TSC waited for input."/>
+ <event event="0x1e" title="Mali GPU Fragment Processor 1" name="Quad rasterized count" description="Number of 2x?2 quads output from rasterizer."/>
+ <event event="0x1f" title="Mali GPU Fragment Processor 1" name="Fragment rasterized count" description="Number of fragment rasterized. Fragments/(Quads*4) gives average actual fragments per quad."/>
+ <event event="0x20" title="Mali GPU Fragment Processor 1" name="Fragment rejected fragment-kill count" description="Number of fragments exiting the fragment shader as killed."/>
+ <event event="0x21" title="Mali GPU Fragment Processor 1" name="Fragment rejected fwd-fragment-kill count" description="Number of fragments killed by forward fragment kill."/>
+ <event event="0x22" title="Mali GPU Fragment Processor 1" name="Fragment passed z/stencil count" description="Number of fragments passing Z and stencil test."/>
+ <event event="0x23" title="Mali GPU Fragment Processor 1" name="Patches rejected early z/stencil count" description="Number of patches rejected by EarlyZ. A patch can be 8x8, 4x4 or 2x2 pixels."/>
+ <event event="0x24" title="Mali GPU Fragment Processor 1" name="Patches evaluated" description="Number of patches evaluated for EarlyZ rejection."/>
+ <event event="0x25" title="Mali GPU Fragment Processor 1" name="Instruction completed count" description="Number of fragment shader instruction words completed. It is a function of pixels processed and the length of the shader programs."/>
+ <event event="0x26" title="Mali GPU Fragment Processor 1" name="Instruction failed rendezvous count" description="Number of fragment shader instructions not completed because of failed Rendezvous."/>
+ <event event="0x27" title="Mali GPU Fragment Processor 1" name="Instruction failed varying-miss count" description="Number of fragment shader instructions not completed because of failed varying operation."/>
+ <event event="0x28" title="Mali GPU Fragment Processor 1" name="Instruction failed texture-miss count" description="Number of fragment shader instructions not completed because of failed texture operation."/>
+ <event event="0x29" title="Mali GPU Fragment Processor 1" name="Instruction failed load-miss count" description="Number of fragment shader instructions not completed because of failed load operation."/>
+ <event event="0x2a" title="Mali GPU Fragment Processor 1" name="Instruction failed tile read-miss count" description="Number of fragment shader instructions not completed because of failed read from the tilebuffer."/>
+ <event event="0x2b" title="Mali GPU Fragment Processor 1" name="Instruction failed store-miss count" description="Number of fragment shader instructions not completed because of failed store operation."/>
+ <event event="0x2c" title="Mali GPU Fragment Processor 1" name="Rendezvous breakage count" description="Number of Rendezvous breakages reported."/>
+ <event event="0x2d" title="Mali GPU Fragment Processor 1" name="Pipeline bubbles cycle count" description="Number of unused cycles in the fragment shader while rendering is active."/>
+ <event event="0x2e" title="Mali GPU Fragment Processor 1" name="Texture mapper multipass count" description="Number of texture operations looped because of more texture passes needed."/>
+ <event event="0x2f" title="Mali GPU Fragment Processor 1" name="Texture mapper cycle count" description="Number of texture operation cycles."/>
+ <event event="0x30" title="Mali GPU Fragment Processor 1" name="Vertex cache hit count" description="Number of times a requested vertex was found in the cache (Number of vertex cache hits)."/>
+ <event event="0x31" title="Mali GPU Fragment Processor 1" name="Vertex cache miss count" description="Number of times a requested vertex was not found in the cache (Number of vertex cache misses)."/>
+ <event event="0x32" title="Mali GPU Fragment Processor 1" name="Varying cache hit count" description="Number of times a requested varying was found in the cache (Number of varying cache hits)."/>
+ <event event="0x33" title="Mali GPU Fragment Processor 1" name="Varying cache miss count" description="Number of times a requested varying was not found in the cache (Number of varying cache misses)."/>
+ <event event="0x34" title="Mali GPU Fragment Processor 1" name="Varying cache conflict miss count" description="Number of times a requested varying was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x35" title="Mali GPU Fragment Processor 1" name="Texture cache hit count" description="Number of times a requested texel was found in the texture cache (Number of texture cache hits)."/>
+ <event event="0x36" title="Mali GPU Fragment Processor 1" name="Texture cache miss count" description="Number of times a requested texel was not found in the texture cache (Number of texture cache misses)."/>
+ <event event="0x37" title="Mali GPU Fragment Processor 1" name="Texture cache conflict miss count" description="Number of times a requested texel was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x38" title="Mali GPU Fragment Processor 1" name="Compressed texture cache hit count" description="Number of times a requested item was found in the cache."/>
+ <event event="0x39" title="Mali GPU Fragment Processor 1" name="Compressed texture cache miss count" description="Number of times a requested item was not found in the cache."/>
+ <event event="0x3a" title="Mali GPU Fragment Processor 1" name="Load/Store cache hit count" description="Number of hits in the load/store cache."/>
+ <event event="0x3b" title="Mali GPU Fragment Processor 1" name="Load/Store cache miss count" description="Number of misses in the load/store cache."/>
+ <event event="0x3c" title="Mali GPU Fragment Processor 1" name="Program cache hit count" description="Number of hits in the program cache."/>
+ <event event="0x3d" title="Mali GPU Fragment Processor 1" name="Program cache miss count" description="Number of misses in the program cache."/>
+ </category>
+ <category name="Mali-400-FP2" counter_set="ARM_Mali-400_FP2_cnt" per_cpu="no">
+ <event event="0x00" title="Mali GPU Fragment Processor 2" name="Active clock cycles" description="Active clock cycles, between polygon start and IRQ."/>
+ <event event="0x02" title="Mali GPU Fragment Processor 2" name="Total bus reads" description="Total number of 64-bit words read from the bus."/>
+ <event event="0x03" title="Mali GPU Fragment Processor 2" name="Total bus writes" description="Total number of 64-bit words written to the bus."/>
+ <event event="0x04" title="Mali GPU Fragment Processor 2" name="Bus read request cycles" description="Number of cycles during which the bus read request signal was HIGH."/>
+ <event event="0x05" title="Mali GPU Fragment Processor 2" name="Bus write request cycles" description="Number of cycles during which the bus write request signal was HIGH."/>
+ <event event="0x06" title="Mali GPU Fragment Processor 2" name="Bus read transactions count" description="Number of read requests accepted by the bus."/>
+ <event event="0x07" title="Mali GPU Fragment Processor 2" name="Bus write transactions" description="Number of write requests accepted by the bus."/>
+ <event event="0x09" title="Mali GPU Fragment Processor 2" name="Tile writeback writes" description="64-bit words written to the bus by the writeback unit."/>
+ <event event="0x0a" title="Mali GPU Fragment Processor 2" name="Store unit writes" description="64-bit words written to the bus by the store unit."/>
+ <event event="0x0d" title="Mali GPU Fragment Processor 2" name="Texture cache uncompressed reads" description="Number of 64-bit words read from the bus into the uncompressed textures cache."/>
+ <event event="0x0e" title="Mali GPU Fragment Processor 2" name="Polygon list reads" description="Number of 64-bit words read from the bus by the polygon list reader."/>
+ <event event="0x0f" title="Mali GPU Fragment Processor 2" name="RSW reads" description="Number of 64-bit words read from the bus into the Render State Word register."/>
+ <event event="0x10" title="Mali GPU Fragment Processor 2" name="Vertex cache reads" description="Number of 64-bit words read from the bus into the vertex cache."/>
+ <event event="0x11" title="Mali GPU Fragment Processor 2" name="Uniform remapping reads" description="Number of 64-bit words read from the bus when reading from the uniform remapping table."/>
+ <event event="0x12" title="Mali GPU Fragment Processor 2" name="Program cache reads" description="Number of 64-bit words read from the bus into the fragment shader program cache."/>
+ <event event="0x13" title="Mali GPU Fragment Processor 2" name="Varying reads" description="Number of 64-bit words containing varyings generated by the vertex processing read from the bus."/>
+ <event event="0x14" title="Mali GPU Fragment Processor 2" name="Texture descriptors reads" description="Number of 64-bit words containing texture descriptors read from the bus."/>
+ <event event="0x15" title="Mali GPU Fragment Processor 2" name="Texture descriptor remapping reads" description="Number of 64-bit words read from the bus when reading from the texture descriptor remapping table."/>
+ <event event="0x17" title="Mali GPU Fragment Processor 2" name="Load unit reads" description="Number of 64-bit words read from the bus by the LOAD sub-instruction."/>
+ <event event="0x18" title="Mali GPU Fragment Processor 2" name="Polygon count" description="Number of triangles read from the polygon list."/>
+ <event event="0x19" title="Mali GPU Fragment Processor 2" name="Pixel rectangle count" description="Number of pixel rectangles read from the polygon list."/>
+ <event event="0x1a" title="Mali GPU Fragment Processor 2" name="Lines count" description="Number of lines read from the polygon list."/>
+ <event event="0x1b" title="Mali GPU Fragment Processor 2" name="Points count" description="Number of points read from the polygon list."/>
+ <event event="0x1c" title="Mali GPU Fragment Processor 2" name="Stall cycles PolygonListReader" description="Number of clock cycles the Polygon List Reader waited for output being collected."/>
+ <event event="0x1d" title="Mali GPU Fragment Processor 2" name="Stall cycles triangle setup" description="Number of clock cycles the TSC waited for input."/>
+ <event event="0x1e" title="Mali GPU Fragment Processor 2" name="Quad rasterized count" description="Number of 2x?2 quads output from rasterizer."/>
+ <event event="0x1f" title="Mali GPU Fragment Processor 2" name="Fragment rasterized count" description="Number of fragment rasterized. Fragments/(Quads*4) gives average actual fragments per quad."/>
+ <event event="0x20" title="Mali GPU Fragment Processor 2" name="Fragment rejected fragment-kill count" description="Number of fragments exiting the fragment shader as killed."/>
+ <event event="0x21" title="Mali GPU Fragment Processor 2" name="Fragment rejected fwd-fragment-kill count" description="Number of fragments killed by forward fragment kill."/>
+ <event event="0x22" title="Mali GPU Fragment Processor 2" name="Fragment passed z/stencil count" description="Number of fragments passing Z and stencil test."/>
+ <event event="0x23" title="Mali GPU Fragment Processor 2" name="Patches rejected early z/stencil count" description="Number of patches rejected by EarlyZ. A patch can be 8x8, 4x4 or 2x2 pixels."/>
+ <event event="0x24" title="Mali GPU Fragment Processor 2" name="Patches evaluated" description="Number of patches evaluated for EarlyZ rejection."/>
+ <event event="0x25" title="Mali GPU Fragment Processor 2" name="Instruction completed count" description="Number of fragment shader instruction words completed. It is a function of pixels processed and the length of the shader programs."/>
+ <event event="0x26" title="Mali GPU Fragment Processor 2" name="Instruction failed rendezvous count" description="Number of fragment shader instructions not completed because of failed Rendezvous."/>
+ <event event="0x27" title="Mali GPU Fragment Processor 2" name="Instruction failed varying-miss count" description="Number of fragment shader instructions not completed because of failed varying operation."/>
+ <event event="0x28" title="Mali GPU Fragment Processor 2" name="Instruction failed texture-miss count" description="Number of fragment shader instructions not completed because of failed texture operation."/>
+ <event event="0x29" title="Mali GPU Fragment Processor 2" name="Instruction failed load-miss count" description="Number of fragment shader instructions not completed because of failed load operation."/>
+ <event event="0x2a" title="Mali GPU Fragment Processor 2" name="Instruction failed tile read-miss count" description="Number of fragment shader instructions not completed because of failed read from the tilebuffer."/>
+ <event event="0x2b" title="Mali GPU Fragment Processor 2" name="Instruction failed store-miss count" description="Number of fragment shader instructions not completed because of failed store operation."/>
+ <event event="0x2c" title="Mali GPU Fragment Processor 2" name="Rendezvous breakage count" description="Number of Rendezvous breakages reported."/>
+ <event event="0x2d" title="Mali GPU Fragment Processor 2" name="Pipeline bubbles cycle count" description="Number of unused cycles in the fragment shader while rendering is active."/>
+ <event event="0x2e" title="Mali GPU Fragment Processor 2" name="Texture mapper multipass count" description="Number of texture operations looped because of more texture passes needed."/>
+ <event event="0x2f" title="Mali GPU Fragment Processor 2" name="Texture mapper cycle count" description="Number of texture operation cycles."/>
+ <event event="0x30" title="Mali GPU Fragment Processor 2" name="Vertex cache hit count" description="Number of times a requested vertex was found in the cache (Number of vertex cache hits)."/>
+ <event event="0x31" title="Mali GPU Fragment Processor 2" name="Vertex cache miss count" description="Number of times a requested vertex was not found in the cache (Number of vertex cache misses)."/>
+ <event event="0x32" title="Mali GPU Fragment Processor 2" name="Varying cache hit count" description="Number of times a requested varying was found in the cache (Number of varying cache hits)."/>
+ <event event="0x33" title="Mali GPU Fragment Processor 2" name="Varying cache miss count" description="Number of times a requested varying was not found in the cache (Number of varying cache misses)."/>
+ <event event="0x34" title="Mali GPU Fragment Processor 2" name="Varying cache conflict miss count" description="Number of times a requested varying was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x35" title="Mali GPU Fragment Processor 2" name="Texture cache hit count" description="Number of times a requested texel was found in the texture cache (Number of texture cache hits)."/>
+ <event event="0x36" title="Mali GPU Fragment Processor 2" name="Texture cache miss count" description="Number of times a requested texel was not found in the texture cache (Number of texture cache misses)."/>
+ <event event="0x37" title="Mali GPU Fragment Processor 2" name="Texture cache conflict miss count" description="Number of times a requested texel was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x38" title="Mali GPU Fragment Processor 2" name="Compressed texture cache hit count" description="Number of times a requested item was found in the cache."/>
+ <event event="0x39" title="Mali GPU Fragment Processor 2" name="Compressed texture cache miss count" description="Number of times a requested item was not found in the cache."/>
+ <event event="0x3a" title="Mali GPU Fragment Processor 2" name="Load/Store cache hit count" description="Number of hits in the load/store cache."/>
+ <event event="0x3b" title="Mali GPU Fragment Processor 2" name="Load/Store cache miss count" description="Number of misses in the load/store cache."/>
+ <event event="0x3c" title="Mali GPU Fragment Processor 2" name="Program cache hit count" description="Number of hits in the program cache."/>
+ <event event="0x3d" title="Mali GPU Fragment Processor 2" name="Program cache miss count" description="Number of misses in the program cache."/>
+ </category>
+ <category name="Mali-400-FP3" counter_set="ARM_Mali-400_FP3_cnt" per_cpu="no">
+ <event event="0x00" title="Mali GPU Fragment Processor 3" name="Active clock cycles" description="Active clock cycles, between polygon start and IRQ."/>
+ <event event="0x02" title="Mali GPU Fragment Processor 3" name="Total bus reads" description="Total number of 64-bit words read from the bus."/>
+ <event event="0x03" title="Mali GPU Fragment Processor 3" name="Total bus writes" description="Total number of 64-bit words written to the bus."/>
+ <event event="0x04" title="Mali GPU Fragment Processor 3" name="Bus read request cycles" description="Number of cycles during which the bus read request signal was HIGH."/>
+ <event event="0x05" title="Mali GPU Fragment Processor 3" name="Bus write request cycles" description="Number of cycles during which the bus write request signal was HIGH."/>
+ <event event="0x06" title="Mali GPU Fragment Processor 3" name="Bus read transactions count" description="Number of read requests accepted by the bus."/>
+ <event event="0x07" title="Mali GPU Fragment Processor 3" name="Bus write transactions" description="Number of write requests accepted by the bus."/>
+ <event event="0x09" title="Mali GPU Fragment Processor 3" name="Tile writeback writes" description="64-bit words written to the bus by the writeback unit."/>
+ <event event="0x0a" title="Mali GPU Fragment Processor 3" name="Store unit writes" description="64-bit words written to the bus by the store unit."/>
+ <event event="0x0d" title="Mali GPU Fragment Processor 3" name="Texture cache uncompressed reads" description="Number of 64-bit words read from the bus into the uncompressed textures cache."/>
+ <event event="0x0e" title="Mali GPU Fragment Processor 3" name="Polygon list reads" description="Number of 64-bit words read from the bus by the polygon list reader."/>
+ <event event="0x0f" title="Mali GPU Fragment Processor 3" name="RSW reads" description="Number of 64-bit words read from the bus into the Render State Word register."/>
+ <event event="0x10" title="Mali GPU Fragment Processor 3" name="Vertex cache reads" description="Number of 64-bit words read from the bus into the vertex cache."/>
+ <event event="0x11" title="Mali GPU Fragment Processor 3" name="Uniform remapping reads" description="Number of 64-bit words read from the bus when reading from the uniform remapping table."/>
+ <event event="0x12" title="Mali GPU Fragment Processor 3" name="Program cache reads" description="Number of 64-bit words read from the bus into the fragment shader program cache."/>
+ <event event="0x13" title="Mali GPU Fragment Processor 3" name="Varying reads" description="Number of 64-bit words containing varyings generated by the vertex processing read from the bus."/>
+ <event event="0x14" title="Mali GPU Fragment Processor 3" name="Texture descriptors reads" description="Number of 64-bit words containing texture descriptors read from the bus."/>
+ <event event="0x15" title="Mali GPU Fragment Processor 3" name="Texture descriptor remapping reads" description="Number of 64-bit words read from the bus when reading from the texture descriptor remapping table."/>
+ <event event="0x17" title="Mali GPU Fragment Processor 3" name="Load unit reads" description="Number of 64-bit words read from the bus by the LOAD sub-instruction."/>
+ <event event="0x18" title="Mali GPU Fragment Processor 3" name="Polygon count" description="Number of triangles read from the polygon list."/>
+ <event event="0x19" title="Mali GPU Fragment Processor 3" name="Pixel rectangle count" description="Number of pixel rectangles read from the polygon list."/>
+ <event event="0x1a" title="Mali GPU Fragment Processor 3" name="Lines count" description="Number of lines read from the polygon list."/>
+ <event event="0x1b" title="Mali GPU Fragment Processor 3" name="Points count" description="Number of points read from the polygon list."/>
+ <event event="0x1c" title="Mali GPU Fragment Processor 3" name="Stall cycles PolygonListReader" description="Number of clock cycles the Polygon List Reader waited for output being collected."/>
+ <event event="0x1d" title="Mali GPU Fragment Processor 3" name="Stall cycles triangle setup" description="Number of clock cycles the TSC waited for input."/>
+ <event event="0x1e" title="Mali GPU Fragment Processor 3" name="Quad rasterized count" description="Number of 2x?2 quads output from rasterizer."/>
+ <event event="0x1f" title="Mali GPU Fragment Processor 3" name="Fragment rasterized count" description="Number of fragment rasterized. Fragments/(Quads*4) gives average actual fragments per quad."/>
+ <event event="0x20" title="Mali GPU Fragment Processor 3" name="Fragment rejected fragment-kill count" description="Number of fragments exiting the fragment shader as killed."/>
+ <event event="0x21" title="Mali GPU Fragment Processor 3" name="Fragment rejected fwd-fragment-kill count" description="Number of fragments killed by forward fragment kill."/>
+ <event event="0x22" title="Mali GPU Fragment Processor 3" name="Fragment passed z/stencil count" description="Number of fragments passing Z and stencil test."/>
+ <event event="0x23" title="Mali GPU Fragment Processor 3" name="Patches rejected early z/stencil count" description="Number of patches rejected by EarlyZ. A patch can be 8x8, 4x4 or 2x2 pixels."/>
+ <event event="0x24" title="Mali GPU Fragment Processor 3" name="Patches evaluated" description="Number of patches evaluated for EarlyZ rejection."/>
+ <event event="0x25" title="Mali GPU Fragment Processor 3" name="Instruction completed count" description="Number of fragment shader instruction words completed. It is a function of pixels processed and the length of the shader programs."/>
+ <event event="0x26" title="Mali GPU Fragment Processor 3" name="Instruction failed rendezvous count" description="Number of fragment shader instructions not completed because of failed Rendezvous."/>
+ <event event="0x27" title="Mali GPU Fragment Processor 3" name="Instruction failed varying-miss count" description="Number of fragment shader instructions not completed because of failed varying operation."/>
+ <event event="0x28" title="Mali GPU Fragment Processor 3" name="Instruction failed texture-miss count" description="Number of fragment shader instructions not completed because of failed texture operation."/>
+ <event event="0x29" title="Mali GPU Fragment Processor 3" name="Instruction failed load-miss count" description="Number of fragment shader instructions not completed because of failed load operation."/>
+ <event event="0x2a" title="Mali GPU Fragment Processor 3" name="Instruction failed tile read-miss count" description="Number of fragment shader instructions not completed because of failed read from the tilebuffer."/>
+ <event event="0x2b" title="Mali GPU Fragment Processor 3" name="Instruction failed store-miss count" description="Number of fragment shader instructions not completed because of failed store operation."/>
+ <event event="0x2c" title="Mali GPU Fragment Processor 3" name="Rendezvous breakage count" description="Number of Rendezvous breakages reported."/>
+ <event event="0x2d" title="Mali GPU Fragment Processor 3" name="Pipeline bubbles cycle count" description="Number of unused cycles in the fragment shader while rendering is active."/>
+ <event event="0x2e" title="Mali GPU Fragment Processor 3" name="Texture mapper multipass count" description="Number of texture operations looped because of more texture passes needed."/>
+ <event event="0x2f" title="Mali GPU Fragment Processor 3" name="Texture mapper cycle count" description="Number of texture operation cycles."/>
+ <event event="0x30" title="Mali GPU Fragment Processor 3" name="Vertex cache hit count" description="Number of times a requested vertex was found in the cache (Number of vertex cache hits)."/>
+ <event event="0x31" title="Mali GPU Fragment Processor 3" name="Vertex cache miss count" description="Number of times a requested vertex was not found in the cache (Number of vertex cache misses)."/>
+ <event event="0x32" title="Mali GPU Fragment Processor 3" name="Varying cache hit count" description="Number of times a requested varying was found in the cache (Number of varying cache hits)."/>
+ <event event="0x33" title="Mali GPU Fragment Processor 3" name="Varying cache miss count" description="Number of times a requested varying was not found in the cache (Number of varying cache misses)."/>
+ <event event="0x34" title="Mali GPU Fragment Processor 3" name="Varying cache conflict miss count" description="Number of times a requested varying was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x35" title="Mali GPU Fragment Processor 3" name="Texture cache hit count" description="Number of times a requested texel was found in the texture cache (Number of texture cache hits)."/>
+ <event event="0x36" title="Mali GPU Fragment Processor 3" name="Texture cache miss count" description="Number of times a requested texel was not found in the texture cache (Number of texture cache misses)."/>
+ <event event="0x37" title="Mali GPU Fragment Processor 3" name="Texture cache conflict miss count" description="Number of times a requested texel was not in the cache and its value, retrieved from memory, must overwrite an older cache entry. This happens when an access pattern cannot be serviced by the cache."/>
+ <event event="0x38" title="Mali GPU Fragment Processor 3" name="Compressed texture cache hit count" description="Number of times a requested item was found in the cache."/>
+ <event event="0x39" title="Mali GPU Fragment Processor 3" name="Compressed texture cache miss count" description="Number of times a requested item was not found in the cache."/>
+ <event event="0x3a" title="Mali GPU Fragment Processor 3" name="Load/Store cache hit count" description="Number of hits in the load/store cache."/>
+ <event event="0x3b" title="Mali GPU Fragment Processor 3" name="Load/Store cache miss count" description="Number of misses in the load/store cache."/>
+ <event event="0x3c" title="Mali GPU Fragment Processor 3" name="Program cache hit count" description="Number of hits in the program cache."/>
+ <event event="0x3d" title="Mali GPU Fragment Processor 3" name="Program cache miss count" description="Number of misses in the program cache."/>
+ </category>
+ <category name="Mali-400-L2" counter_set="ARM_Mali-400_L2_cnt" per_cpu="no">
+ <event event="0x01" title="Mali L2 Cache" name="Total clock cycles" description="Total clock cycles."/>
+ <event event="0x02" title="Mali L2 Cache" name="Active clock cycles" description="Active clock cycles." />
+ <event event="0x08" title="Mali L2 Cache" name="Read transactions, master" description="Read transactions, master." />
+ <event event="0x09" title="Mali L2 Cache" name="Write transactions, master" description="Write transactions, master." />
+ <event event="0x0a" title="Mali L2 Cache" name="Words read, master" description="Words read, master." />
+ <event event="0x0b" title="Mali L2 Cache" name="Words written, master" description="Words written, master." />
+ <event event="0x10" title="Mali L2 Cache" name="Read transactions, all slaves" description="Read transactions, all slaves." />
+ <event event="0x11" title="Mali L2 Cache" name="Write transactions, all slaves" description="Write transactions, all slaves." />
+ <event event="0x12" title="Mali L2 Cache" name="Words read, all slaves" description="Words read, all slaves." />
+ <event event="0x13" title="Mali L2 Cache" name="Words written, all slaves" description="Words written, all slaves." />
+ <event event="0x14" title="Mali L2 Cache" name="Read hits, all slaves" description="Read hits, all slaves." />
+ <event event="0x15" title="Mali L2 Cache" name="Read misses, all slaves" description="Read misses, all slaves." />
+ <event event="0x16" title="Mali L2 Cache" name="Write invalidates, all slaves" description="Write invalidates, all slaves." />
+ <event event="0x17" title="Mali L2 Cache" name="Read invalidates, all slaves" description="Read invalidates, all slaves." />
+ <event event="0x18" title="Mali L2 Cache" name="Cacheable read transactions, all slaves" description="Cacheable read transactions, all slaves." />
+ <event event="0x20" title="Mali L2 Cache" name="Read transactions, slave 0" description="Read transactions, slave 0." />
+ <event event="0x21" title="Mali L2 Cache" name="Write transactions, slave 0" description="Write transactions, slave 0." />
+ <event event="0x22" title="Mali L2 Cache" name="Words read, slave 0" description="Words read, slave 0." />
+ <event event="0x23" title="Mali L2 Cache" name="Words written, slave 0" description="Words written, slave 0." />
+ <event event="0x24" title="Mali L2 Cache" name="Read hits, slave 0" description="Read hits, slave 0." />
+ <event event="0x25" title="Mali L2 Cache" name="Read misses, slave 0" description="Read misses, slave 0." />
+ <event event="0x26" title="Mali L2 Cache" name="Write invalidates, slave 0" description="Write invalidates, slave 0." />
+ <event event="0x27" title="Mali L2 Cache" name="Read invalidates, slave 0" description="Read invalidates, slave 0." />
+ <event event="0x28" title="Mali L2 Cache" name="Cacheable read transactions, slave 0" description="Cacheable read transactions, slave 0." />
+ <event event="0x30" title="Mali L2 Cache" name="Read transactions, slave 1" description="Read transactions, slave 1." />
+ <event event="0x31" title="Mali L2 Cache" name="Write transactions, slave 1" description="Write transactions, slave 1." />
+ <event event="0x32" title="Mali L2 Cache" name="Words read, slave 1" description="Words read, slave 1." />
+ <event event="0x33" title="Mali L2 Cache" name="Words written, slave 1" description="Words written, slave 1." />
+ <event event="0x34" title="Mali L2 Cache" name="Read hits, slave 1" description="Read hits, slave 1." />
+ <event event="0x35" title="Mali L2 Cache" name="Read misses, slave 1" description="Read misses, slave 1." />
+ <event event="0x36" title="Mali L2 Cache" name="Write invalidates, slave 1" description="Write invalidates, slave 1." />
+ <event event="0x37" title="Mali L2 Cache" name="Read invalidates, slave 1" description="Read invalidates, slave 1." />
+ <event event="0x38" title="Mali L2 Cache" name="Cacheable read transactions, slave 1" description="Cacheable read transactions, slave 1." />
+ <event event="0x40" title="Mali L2 Cache" name="Read transactions, slave 2" description="Read transactions, slave 2." />
+ <event event="0x41" title="Mali L2 Cache" name="Write transactions, slave 2" description="Write transactions, slave 2." />
+ <event event="0x42" title="Mali L2 Cache" name="Words read, slave 2" description="Words read, slave 2." />
+ <event event="0x43" title="Mali L2 Cache" name="Words written, slave 2" description="Words written, slave 2." />
+ <event event="0x44" title="Mali L2 Cache" name="Read hits, slave 2" description="Read hits, slave 2." />
+ <event event="0x45" title="Mali L2 Cache" name="Read misses, slave 2" description="Read misses, slave 2." />
+ <event event="0x46" title="Mali L2 Cache" name="Write invalidates, slave 2" description="Write invalidates, slave 2." />
+ <event event="0x47" title="Mali L2 Cache" name="Read invalidates, slave 2" description="Read invalidates, slave 2." />
+ <event event="0x48" title="Mali L2 Cache" name="Cacheable read transactions, slave 2" description="Cacheable read transactions, slave 2." />
+ <event event="0x50" title="Mali L2 Cache" name="Read transactions, slave 3" description="Read transactions, slave 3." />
+ <event event="0x51" title="Mali L2 Cache" name="Write transactions, slave 3" description="Write transactions, slave 3." />
+ <event event="0x52" title="Mali L2 Cache" name="Words read, slave 3" description="Words read, slave 3." />
+ <event event="0x53" title="Mali L2 Cache" name="Words written, slave 3" description="Words written, slave 3." />
+ <event event="0x54" title="Mali L2 Cache" name="Read hits, slave 3" description="Read hits, slave 3." />
+ <event event="0x55" title="Mali L2 Cache" name="Read misses, slave 3" description="Read misses, slave 3." />
+ <event event="0x56" title="Mali L2 Cache" name="Write invalidates, slave 3" description="Write invalidates, slave 3." />
+ <event event="0x57" title="Mali L2 Cache" name="Read invalidates, slave 3" description="Read invalidates, slave 3." />
+ <event event="0x58" title="Mali L2 Cache" name="Cacheable read transactions, slave 3" description="Cacheable read transactions, slave 3." />
+ <event event="0x60" title="Mali L2 Cache" name="Read transactions, slave 4" description="Read transactions, slave 4." />
+ <event event="0x61" title="Mali L2 Cache" name="Write transactions, slave 4" description="Write transactions, slave 4." />
+ <event event="0x62" title="Mali L2 Cache" name="Words read, slave 4" description="Words read, slave 4." />
+ <event event="0x63" title="Mali L2 Cache" name="Words written, slave 4" description="Words written, slave 4." />
+ <event event="0x64" title="Mali L2 Cache" name="Read hits, slave 4" description="Read hits, slave 4." />
+ <event event="0x65" title="Mali L2 Cache" name="Read misses, slave 4" description="Read misses, slave 4." />
+ <event event="0x66" title="Mali L2 Cache" name="Write invalidates, slave 4" description="Write invalidates, slave 4." />
+ <event event="0x67" title="Mali L2 Cache" name="Read invalidates, slave 4" description="Read invalidates, slave 4." />
+ <event event="0x68" title="Mali L2 Cache" name="Cacheable read transactions, slave 4" description="Cacheable read transactions, slave 4." />
+ </category>
+ <category name="ARM_Mali-400_Filmstrip" counter_set="ARM_Mali-400_Filmstrip_cnt" per_cpu="no">
+ <event event="0x040a" title="ARM_Mali-400_Filmstrip" name="Freq 1:10" description="Scaled framebuffer captures every 10th frame." />
+ <event event="0x041e" title="ARM_Mali-400_Filmstrip" name="Freq 1:30" description="Scaled framebuffer captures every 30th frame." />
+ <event event="0x043c" title="ARM_Mali-400_Filmstrip" name="Freq 1:60" description="Scaled framebuffer captures every 60th frame." />
+ </category>
+ <category name="ARM_Mali-400_Voltage" per_cpu="no">
+ <event counter="ARM_Mali-400_Voltage" title="Mali GPU Voltage" name="Voltage" display="average" average_selection="yes" units="mV" description="GPU core voltage."/>
+ </category>
+ <category name="ARM_Mali-400_Frequency" per_cpu="no">
+ <event counter="ARM_Mali-400_Frequency" title="Mali GPU Frequency" name="Frequency" display="average" average_selection="yes" units="MHz" description="GPU core frequency."/>
+ </category>
+ <category name="Mali-400-SW" counter_set="ARM_Mali-400_SW_cnt" per_cpu="no">
+ <!-- EGL Counters -->
+ <event counter="ARM_Mali-400_SW_17" title="Mali EGL Software Counters" name="Blit Time" description="Time spent blitting the framebuffer from video memory to framebuffer."/>
+ <!-- glDrawElements Counters -->
+ <event counter="ARM_Mali-400_SW_18" title="glDrawElements Statistics" name="Calls to glDrawElements" description="Number of calls to glDrawElements."/>
+ <event counter="ARM_Mali-400_SW_19" title="glDrawElements Statistics" name="Indices to glDrawElements" description="Number of indices to glDrawElements."/>
+ <event counter="ARM_Mali-400_SW_20" title="glDrawElements Statistics" name="Transformed by glDrawElements" description="Number of vertices transformed by glDrawElements."/>
+ <!-- glDrawArrays Counters -->
+ <event counter="ARM_Mali-400_SW_21" title="glDrawArrays Statistics" name="Calls to glDrawArrays" description="Number of calls to glDrawArrays."/>
+ <event counter="ARM_Mali-400_SW_22" title="glDrawArrays Statistics" name="Transformed by glDrawArrays" description="Number of vertices transformed by glDrawArrays."/>
+ <!-- Draw Call Counters -->
+ <event counter="ARM_Mali-400_SW_23" title="Drawcall Statistics" name="Points" description="Number of calls to glDraw* with parameter GL_POINTS."/>
+ <event counter="ARM_Mali-400_SW_24" title="Drawcall Statistics" name="Lines" description="Number of calls to glDraw* with parameter GL_LINES."/>
+ <event counter="ARM_Mali-400_SW_25" title="Drawcall Statistics" name="Lineloop" description="Number of calls to glDraw* with parameter GL_LINE_LOOP."/>
+ <event counter="ARM_Mali-400_SW_26" title="Drawcall Statistics" name="Linestrip" description="Number of calls to glDraw* with parameter GL_LINE_STRIP."/>
+ <event counter="ARM_Mali-400_SW_27" title="Drawcall Statistics" name="Triangles" description="Number of calls to glDraw* with parameter GL_TRIANGLES."/>
+ <event counter="ARM_Mali-400_SW_28" title="Drawcall Statistics" name="Trianglestrip" description="Number of calls to glDraw* with parameter GL_TRIANGLE_STRIP."/>
+ <event counter="ARM_Mali-400_SW_29" title="Drawcall Statistics" name="Trianglefan" description="Number of calls to glDraw* with parameter GL_TRIANGLE_FAN."/>
+ <event counter="ARM_Mali-400_SW_30" title="Drawcall Statistics" name="Vertex Upload Time (us)" description="Time spent uploading vertex attributes and faceindex data not present in a VBO."/>
+ <event counter="ARM_Mali-400_SW_31" title="Drawcall Statistics" name="Uniform Bytes Copied (bytes)" description="Number of bytes copied to Mali memory as a result of uniforms update."/>
+ <!-- Buffer Profiling Counters -->
+ <event counter="ARM_Mali-400_SW_32" title="Buffer Profiling" name="Texture Upload Time (ms)" description="Time spent uploading textures."/>
+ <event counter="ARM_Mali-400_SW_33" title="Buffer Profiling" name="VBO Upload Time (ms)" description="Time spent uploading vertex buffer objects."/>
+ <event counter="ARM_Mali-400_SW_34" title="Buffer Profiling" name="FBO Flushes" description="Number of flushed on framebuffer attachment."/>
+ <!-- OpenGL ES 1.1 Emulation -->
+ <event counter="ARM_Mali-400_SW_35" title="Fixed-function Emulation" name="# Vertex Shaders Generated" description="Number of vertex shaders generated."/>
+ <event counter="ARM_Mali-400_SW_36" title="Fixed-function Emulation" name="# Fragment Shaders Generated" description="Number of fragment shaders generated."/>
+ <!-- Geometry Statistics -->
+ <event counter="ARM_Mali-400_SW_50" title="Geometry Statistics" name="Triangles" description="The total number of triangles passed to GLES per-frame."/>
+ <event counter="ARM_Mali-400_SW_51" title="Geometry Statistics" name="Independent Triangles" description="Number of triangles passed to GLES using the mode GL_TRIANGLES."/>
+ <event counter="ARM_Mali-400_SW_52" title="Geometry Statistics" name="Strip Triangles" description="Number of triangles passed to GLES using the mode GL_TRIANGLE_STRIP."/>
+ <event counter="ARM_Mali-400_SW_53" title="Geometry Statistics" name="Fan Triangles" description="Number of triangles passed to GLES using the mode GL_TRIANGLE_FAN."/>
+ <event counter="ARM_Mali-400_SW_54" title="Geometry Statistics" name="Lines" description="Number of lines passed to GLES per-frame."/>
+ <event counter="ARM_Mali-400_SW_55" title="Geometry Statistics" name="Independent Lines" description="Number of lines passed to GLES using the mode GL_LINES."/>
+ <event counter="ARM_Mali-400_SW_56" title="Geometry Statistics" name="Strip Lines" description="Number of lines passed to GLES using the mode GL_LINE_STRIP."/>
+ <event counter="ARM_Mali-400_SW_57" title="Geometry Statistics" name="Loop Lines" description="Number of lines passed to GLES using the mode GL_LINE_LOOP."/>
+ </category>
+
diff --git a/tools/gator/daemon/events-Mali-T6xx.xml b/tools/gator/daemon/events-Mali-T6xx.xml
new file mode 100644
index 000000000000..3d795decaa9d
--- /dev/null
+++ b/tools/gator/daemon/events-Mali-T6xx.xml
@@ -0,0 +1,38 @@
+
+ <category name="Mali-T6xx-SW-counters" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_TOTAL_ALLOC_PAGES" title="Mali Total Alloc Pages" name="Total number of allocated pages" description="Mali total number of allocated pages."/>
+ </category>
+
+ <category name="Mali-T6xx-PMShader" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_PM_SHADER_0" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 0" description="Mali PM Shader: PM Shader Core 0."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_1" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 1" description="Mali PM Shader: PM Shader Core 1."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_2" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 2" description="Mali PM Shader: PM Shader Core 2."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_3" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 3" description="Mali PM Shader: PM Shader Core 3."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_4" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 4" description="Mali PM Shader: PM Shader Core 4."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_5" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 5" description="Mali PM Shader: PM Shader Core 5."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_6" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 6" description="Mali PM Shader: PM Shader Core 6."/>
+ <event counter="ARM_Mali-T6xx_PM_SHADER_7" display="average" average_selection="yes" units="%" title="Mali PM Shader" name="PM Shader Core 7" description="Mali PM Shader: PM Shader Core 7."/>
+ </category>
+
+ <category name="Mali-T6xx-PMTiler" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_PM_TILER_0" display="average" average_selection="yes" units="%" title="Mali PM Tiler" name="PM Tiler Core 0" description="Mali PM Tiler: PM Tiler Core 0."/>
+ </category>
+
+ <category name="Mali-T6xx-PML2" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_PM_L2_0" display="average" average_selection="yes" units="%" title="Mali PM L2" name="PM L2 Core 0" description="Mali PM L2: PM L2 Core 0."/>
+ <event counter="ARM_Mali-T6xx_PM_L2_1" display="average" average_selection="yes" units="%" title="Mali PM L2" name="PM L2 Core 1" description="Mali PM L2: PM L2 Core 1."/>
+ </category>
+
+ <category name="Mali-T6xx-MMU_AS" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_MMU_AS_0" display="average" average_selection="yes" units="%" title="Mali MMU Address Space" name="MMU Address Space 0" description="Mali MMU Address Space 0 usage."/>
+ <event counter="ARM_Mali-T6xx_MMU_AS_1" display="average" average_selection="yes" units="%" title="Mali MMU Address Space" name="MMU Address Space 1" description="Mali MMU Address Space 1 usage."/>
+ <event counter="ARM_Mali-T6xx_MMU_AS_2" display="average" average_selection="yes" units="%" title="Mali MMU Address Space" name="MMU Address Space 2" description="Mali MMU Address Space 2 usage."/>
+ <event counter="ARM_Mali-T6xx_MMU_AS_3" display="average" average_selection="yes" units="%" title="Mali MMU Address Space" name="MMU Address Space 3" description="Mali MMU Address Space 3 usage."/>
+ </category>
+
+ <category name="Mali-T6xx-MMU_page_fault" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_0" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 0" description="Reports the number of newly allocated pages after a MMU page fault in address space 0."/>
+ <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_1" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 1" description="Reports the number of newly allocated pages after a MMU page fault in address space 1."/>
+ <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_2" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 2" description="Reports the number of newly allocated pages after a MMU page fault in address space 2."/>
+ <event counter="ARM_Mali-T6xx_MMU_PAGE_FAULT_3" title="Mali MMU Page Fault Add. Space" name="Mali MMU Page Fault Add. Space 3" description="Reports the number of newly allocated pages after a MMU page fault in address space 3."/>
+ </category>
diff --git a/tools/gator/daemon/events-Mali-T6xx_hw.xml b/tools/gator/daemon/events-Mali-T6xx_hw.xml
new file mode 100644
index 000000000000..825b6687d14d
--- /dev/null
+++ b/tools/gator/daemon/events-Mali-T6xx_hw.xml
@@ -0,0 +1,278 @@
+
+ <category name="Mali-T6xx-JobManager" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_MESSAGES_SENT" title="Mali GPU Job Manager" name="Job Manager messages sent" description="Number of JCB messages sent by the Job Manager."/>
+ <event counter="ARM_Mali-T6xx_MESSAGES_RECEIVED" title="Mali GPU Job Manager" name="Job Manager messages received " description="Number of JCB messages received by the Job Manager."/>
+ <event counter="ARM_Mali-T6xx_GPU_ACTIVE" title="Mali GPU Job Manager" name="GPU active cycles" description="Number of cycles the GPU was active."/>
+ <event counter="ARM_Mali-T6xx_IRQ_ACTIVE" title="Mali GPU Job Manager" name="IRQ active cycles" description="Number of cycles with active interrupts."/>
+
+ <event counter="ARM_Mali-T6xx_JS0_JOBS" title="Mali GPU Job Manager" name="Job Slot 0 jobs complete" description="Number of jobs completed in Job Slot 0."/>
+ <event counter="ARM_Mali-T6xx_JS0_TASKS" title="Mali GPU Job Manager" name="Job Slot 0 tasks complete" description="Number of tasks completed in Job Slot 0."/>
+ <event counter="ARM_Mali-T6xx_JS0_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 0 active" description="Number of cycles Job Slot 0 was active."/>
+ <event counter="ARM_Mali-T6xx_JS0_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 0 wait read" description="Number of cycles Job Slot 0 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS0_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 0 wait issue" description="Number of cycles Job Slot 0 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS0_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 0 wait depend" description="Number of cycles Job Slot 0 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS0_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 0 wait finish" description="Number of cycles Job Slot 0 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+
+ <event counter="ARM_Mali-T6xx_JS1_JOBS" title="Mali GPU Job Manager" name="Job Slot 1 jobs complete" description="Number of jobs completed in Job Slot 1."/>
+ <event counter="ARM_Mali-T6xx_JS1_TASKS" title="Mali GPU Job Manager" name="Job Slot 1 tasks complete" description="Number of tasks completed in Job Slot 1."/>
+ <event counter="ARM_Mali-T6xx_JS1_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 1 active" description="Number of cycles Job Slot 1 was active."/>
+ <event counter="ARM_Mali-T6xx_JS1_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 1 wait read" description="Number of cycles Job Slot 1 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS1_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 1 wait issue" description="Number of cycles Job Slot 1 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS1_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 1 wait depend" description="Number of cycles Job Slot 1 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS1_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 1 wait finish" description="Number of cycles Job Slot 1 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+
+ <event counter="ARM_Mali-T6xx_JS2_JOBS" title="Mali GPU Job Manager" name="Job Slot 2 jobs complete" description="Number of jobs completed in Job Slot 2."/>
+ <event counter="ARM_Mali-T6xx_JS2_TASKS" title="Mali GPU Job Manager" name="Job Slot 2 tasks complete" description="Number of tasks completed in Job Slot 2."/>
+ <event counter="ARM_Mali-T6xx_JS2_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 2 active" description="Number of cycles Job Slot 2 was active."/>
+ <event counter="ARM_Mali-T6xx_JS2_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 2 wait read" description="Number of cycles Job Slot 2 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS2_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 2 wait issue" description="Number of cycles Job Slot 2 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS2_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 2 wait depend" description="Number of cycles Job Slot 2 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS2_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 2 wait finish" description="Number of cycles Job Slot 2 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+<!--
+ <event counter="ARM_Mali-T6xx_JS3_JOBS" title="Mali GPU Job Manager" name="Job Slot 3 jobs complete" description="Number of jobs completed in Job Slot 3."/>
+ <event counter="ARM_Mali-T6xx_JS3_TASKS" title="Mali GPU Job Manager" name="Job Slot 3 tasks complete" description="Number of tasks completed in Job Slot 3."/>
+ <event counter="ARM_Mali-T6xx_JS3_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 3 active" description="Number of cycles Job Slot 3 was active."/>
+ <event counter="ARM_Mali-T6xx_JS3_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 3 wait read" description="Number of cycles Job Slot 3 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS3_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 3 wait issue" description="Number of cycles Job Slot 3 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS3_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 3 wait depend" description="Number of cycles Job Slot 3 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS3_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 3 wait finish" description="Number of cycles Job Slot 3 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+
+ <event counter="ARM_Mali-T6xx_JS4_JOBS" title="Mali GPU Job Manager" name="Job Slot 4 jobs complete" description="Number of jobs completed in Job Slot 4."/>
+ <event counter="ARM_Mali-T6xx_JS4_TASKS" title="Mali GPU Job Manager" name="Job Slot 4 tasks complete" description="Number of tasks completed in Job Slot 4."/>
+ <event counter="ARM_Mali-T6xx_JS4_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 4 active" description="Number of cycles Job Slot 4 was active."/>
+ <event counter="ARM_Mali-T6xx_JS4_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 4 wait read" description="Number of cycles Job Slot 4 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS4_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 4 wait issue" description="Number of cycles Job Slot 4 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS4_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 4 wait depend" description="Number of cycles Job Slot 4 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS4_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 4 wait finish" description="Number of cycles Job Slot 4 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+
+ <event counter="ARM_Mali-T6xx_JS5_JOBS" title="Mali GPU Job Manager" name="Job Slot 5 jobs complete" description="Number of jobs completed in Job Slot 5."/>
+ <event counter="ARM_Mali-T6xx_JS5_TASKS" title="Mali GPU Job Manager" name="Job Slot 5 tasks complete" description="Number of tasks completed in Job Slot 5."/>
+ <event counter="ARM_Mali-T6xx_JS5_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 5 active" description="Number of cycles Job Slot 5 was active."/>
+ <event counter="ARM_Mali-T6xx_JS5_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 5 wait read" description="Number of cycles Job Slot 5 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS5_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 5 wait issue" description="Number of cycles Job Slot 5 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS5_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 5 wait depend" description="Number of cycles Job Slot 5 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS5_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 5 wait finish" description="Number of cycles Job Slot 5 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+
+ <event counter="ARM_Mali-T6xx_JS6_JOBS" title="Mali GPU Job Manager" name="Job Slot 6 jobs complete" description="Number of jobs completed in Job Slot 6."/>
+ <event counter="ARM_Mali-T6xx_JS6_TASKS" title="Mali GPU Job Manager" name="Job Slot 6 tasks complete" description="Number of tasks completed in Job Slot 6."/>
+ <event counter="ARM_Mali-T6xx_JS6_ACTIVE" title="Mali GPU Job Manager" name="Job Slot 6 active" description="Number of cycles Job Slot 6 was active."/>
+ <event counter="ARM_Mali-T6xx_JS6_WAIT_READ" title="Mali GPU Job Manager" name="Job Slot 6 wait read" description="Number of cycles Job Slot 6 stalled waiting for descriptors to be read."/>
+ <event counter="ARM_Mali-T6xx_JS6_WAIT_ISSUE" title="Mali GPU Job Manager" name="Job Slot 6 wait issue" description="Number of cycles Job Slot 6 stalled unable to issue tasks because all available cores were full."/>
+ <event counter="ARM_Mali-T6xx_JS6_WAIT_DEPEND" title="Mali GPU Job Manager" name="Job Slot 6 wait depend" description="Number of cycles Job Slot 6 stalled waiting for task dependencies, and the task could have been issued if not for the dependency."/>
+ <event counter="ARM_Mali-T6xx_JS6_WAIT_FINISH" title="Mali GPU Job Manager" name="Job Slot 6 wait finish" description="Number of cycles Job Slot 6 stalled after finishing the last job in the job chain, waiting for all active tasks to finish."/>
+-->
+ </category>
+
+ <category name="Mali-T6xx-Tiler" per_cpu="no">
+<!--
+ <event counter="ARM_Mali-T6xx_JOBS_PROCESSED" title="Mali GPU Tiler" name="Jobs processed" description="Number of jobs processed."/>
+-->
+ <event counter="ARM_Mali-T6xx_TRIANGLES" title="Mali GPU Tiler" name="Triangles processed" description="Number of triangles processed."/>
+ <event counter="ARM_Mali-T6xx_QUADS" title="Mali GPU Tiler" name="Quads processed" description="Number of quads processed."/>
+ <event counter="ARM_Mali-T6xx_POLYGONS" title="Mali GPU Tiler" name="Polygons processed" description="Number of polygons processed."/>
+ <event counter="ARM_Mali-T6xx_POINTS" title="Mali GPU Tiler" name="Points processed" description="Number of points processed."/>
+
+ <event counter="ARM_Mali-T6xx_LINES" title="Mali GPU Tiler" name="Lines processed" description="Number of lines processed."/>
+ <event counter="ARM_Mali-T6xx_VCACHE_HIT" title="Mali GPU Tiler" name="Vertex cache hits" description="Number of vertex cache hits."/>
+ <event counter="ARM_Mali-T6xx_VCACHE_MISS" title="Mali GPU Tiler" name="Vertex cache misses" description="Number of vertex cache misses."/>
+ <event counter="ARM_Mali-T6xx_FRONT_FACING" title="Mali GPU Tiler" name="Front facing primitives" description="Number of front facing primitives."/>
+
+ <event counter="ARM_Mali-T6xx_BACK_FACING" title="Mali GPU Tiler" name="Back facing primitives" description="Number of back facing primitives."/>
+ <event counter="ARM_Mali-T6xx_PRIM_VISIBLE" title="Mali GPU Tiler" name="Visible primitives" description="Number of visible primitives."/>
+ <event counter="ARM_Mali-T6xx_PRIM_CULLED" title="Mali GPU Tiler" name="Culled primitives" description="Number of culled primitives."/>
+ <event counter="ARM_Mali-T6xx_PRIM_CLIPPED" title="Mali GPU Tiler" name="Clipped primitives" description="Number of clipped primitives."/>
+
+ <event counter="ARM_Mali-T6xx_LEVEL0" title="Mali GPU Tiler" name="Level 0 primitives" description="Number of primitives tiled to hierarchy level 0."/>
+ <event counter="ARM_Mali-T6xx_LEVEL1" title="Mali GPU Tiler" name="Level 1 primitives" description="Number of primitives tiled to hierarchy level 1."/>
+ <event counter="ARM_Mali-T6xx_LEVEL2" title="Mali GPU Tiler" name="Level 2 primitives" description="Number of primitives tiled to hierarchy level 2."/>
+ <event counter="ARM_Mali-T6xx_LEVEL3" title="Mali GPU Tiler" name="Level 3 primitives" description="Number of primitives tiled to hierarchy level 3."/>
+
+ <event counter="ARM_Mali-T6xx_LEVEL4" title="Mali GPU Tiler" name="Level 4 primitives" description="Number of primitives tiled to hierarchy level 4."/>
+ <event counter="ARM_Mali-T6xx_LEVEL5" title="Mali GPU Tiler" name="Level 5 primitives" description="Number of primitives tiled to hierarchy level 5."/>
+ <event counter="ARM_Mali-T6xx_LEVEL6" title="Mali GPU Tiler" name="Level 6 primitives" description="Number of primitives tiled to hierarchy level 6."/>
+ <event counter="ARM_Mali-T6xx_LEVEL7" title="Mali GPU Tiler" name="Level 7 primitives" description="Number of primitives tiled to hierarchy level 7."/>
+
+ <event counter="ARM_Mali-T6xx_COMMAND_1" title="Mali GPU Tiler" name="Primitives tiled to 1 command" description="Number of primitives producing 1 command in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_2" title="Mali GPU Tiler" name="Primitives tiled to 2 commands" description="Number of primitives producing 2 commands in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_3" title="Mali GPU Tiler" name="Primitives tiled to 3 commands" description="Number of primitives producing 3 commands in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_4" title="Mali GPU Tiler" name="Primitives tiled to 4 commands" description="Number of primitives producing 4 commands in tile list."/>
+
+ <event counter="ARM_Mali-T6xx_COMMAND_4_7" title="Mali GPU Tiler" name="Primitives tiled to 4-7 commands" description="Number of primitives producing 4-7 commands in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_8_15" title="Mali GPU Tiler" name="Primitives tiled to 8-15 commands" description="Number of primitives producing 8-15 commands in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_16_63" title="Mali GPU Tiler" name="Primitives tiled to 16-63 commands" description="Number of primitives producing 16-63 commands in tile list."/>
+ <event counter="ARM_Mali-T6xx_COMMAND_64" title="Mali GPU Tiler" name="Primitives tiled to >= 64 commands" description="Number of primitives producing >= 64 commands in tile list."/>
+
+ <event counter="ARM_Mali-T6xx_COMPRESS_IN" title="Mali GPU Tiler" name="Commands entering compressor" description="Number of commands entering compressor."/>
+ <event counter="ARM_Mali-T6xx_COMPRESS_OUT" title="Mali GPU Tiler" name="Compressed commands from compressor" description="Number of compressed commands produced by compressor."/>
+ <event counter="ARM_Mali-T6xx_COMPRESS_FLUSH" title="Mali GPU Tiler" name="Compressor state flushes" description="Number of compressor state flushes."/>
+ <event counter="ARM_Mali-T6xx_TIMESTAMPS" title="Mali GPU Tiler" name="Timestamps emitted" description="Number of timestamps emitted."/>
+
+ <event counter="ARM_Mali-T6xx_PCACHE_HIT" title="Mali GPU Tiler" name="Pointer-cache hits" description="Number of pointer-cache hits."/>
+ <event counter="ARM_Mali-T6xx_PCACHE_MISS" title="Mali GPU Tiler" name="Pointer-cache misses" description="Number of pointer-cache misses."/>
+ <event counter="ARM_Mali-T6xx_PCACHE_LINE" title="Mali GPU Tiler" name="Pointer-cache line-fils" description="Number of pointer-cache line-fills."/>
+ <event counter="ARM_Mali-T6xx_PCACHE_STALL" title="Mali GPU Tiler" name="Pointer-cache stalls" description="Number of pointer-cache stalls."/>
+
+ <event counter="ARM_Mali-T6xx_WRBUF_HIT" title="Mali GPU Tiler" name="Write-buffer hits" description="Number of write-buffer hits."/>
+ <event counter="ARM_Mali-T6xx_WRBUF_MISS" title="Mali GPU Tiler" name="Write-buffer misses" description="Number of write-buffer misses."/>
+ <event counter="ARM_Mali-T6xx_WRBUF_LINE" title="Mali GPU Tiler" name="Write-buffer full-lines written out" description="Number of complete cache lines written out from the write buffer."/>
+ <event counter="ARM_Mali-T6xx_WRBUF_PARTIAL" title="Mali GPU Tiler" name="Write-buffer partial-lines written out" description="Number of incomplete cache lines written out from the write buffer."/>
+
+ <event counter="ARM_Mali-T6xx_WRBUF_STALL" title="Mali GPU Tiler" name="Write-buffer stalls" description="Number of write-buffer stalls."/>
+ <event counter="ARM_Mali-T6xx_ACTIVE" title="Mali GPU Tiler" name="Tiler active cycles" description="Number of cycles the tiler is active."/>
+ <event counter="ARM_Mali-T6xx_LOADING_DESC" title="Mali GPU Tiler" name="Cycles loading descriptors" description="Number of cycle spent loading descriptors while the tiler frontend is otherwise idle."/>
+ <event counter="ARM_Mali-T6xx_INDEX_WAIT" title="Mali GPU Tiler" name="Cycles index fetch miss" description="Number of cycles the vertex cache could accept an index, but due to a miss, the index fetcher could not provide one."/>
+
+ <event counter="ARM_Mali-T6xx_INDEX_RANGE_WAIT" title="Mali GPU Tiler" name="Cycles index out of range" description="Number of cycles the index fetcher provides an index, but the index is outside the range of currently shaded vertices. Only relevant for fused jobs."/>
+ <event counter="ARM_Mali-T6xx_VERTEX_WAIT" title="Mali GPU Tiler" name="Cycles vertex cache miss" description="Number of cycles the primitive assembly could accept a vertex, but due to a vertex cache miss, the vertex fetcher is unable to provide a vertex."/>
+ <event counter="ARM_Mali-T6xx_PCACHE_WAIT" title="Mali GPU Tiler" name="Cycles pointer cache miss" description="Number of cycles the command compressor could accept a command, but due to a cache miss, the pointer cache is unable to provide the polygon list pointer."/>
+ <event counter="ARM_Mali-T6xx_WRBUF_WAIT" title="Mali GPU Tiler" name="Cycles no write buffer entry" description="Number of cycles a command could be written to the write buffer, but no write buffer entry is available."/>
+
+ <event counter="ARM_Mali-T6xx_BUS_READ" title="Mali GPU Tiler" name="Data beats from L2 cache read." description="Number of data beats (64-bit) on read from the L2 cache."/>
+ <event counter="ARM_Mali-T6xx_BUS_WRITE" title="Mali GPU Tiler" name="Data beats from L2 chache write" description="Number of data beats (64-bit) on written to the L2 cache."/>
+
+ <event counter="ARM_Mali-T6xx_UTLB_STALL" title="Mali GPU Tiler" name="uTLB cycles stall" description="uTLB: Cycles with stall on input AXI address channel."/>
+
+ <event counter="ARM_Mali-T6xx_UTLB_REPLAY_MISS" title="Mali GPU Tiler" name="uTLB replay buffer cache misses" description="uTLB: Number of cache misses on accesses from replay buffer."/>
+ <event counter="ARM_Mali-T6xx_UTLB_REPLAY_FULL" title="Mali GPU Tiler" name="uTLB cycle reply buffer full" description="uTLB: Number of cycles replay buffer is full."/>
+ <event counter="ARM_Mali-T6xx_UTLB_NEW_MISS" title="Mali GPU Tiler" name="uTLB cache misses on new request" description="uTLB: Number of cache misses on new requests."/>
+ <event counter="ARM_Mali-T6xx_UTLB_HIT" title="Mali GPU Tiler" name="uTLB cache hits" description="uTLB: Number of cache hits."/>
+ </category>
+
+ <category name="Mali-T6xx-ShaderCore" per_cpu="no">
+<!--
+ <event counter="ARM_Mali-T6xx_SHADER_CORE_ACTIVE" title="Mali GPU Shader Core" name="Shader core active cycles" description="Number of cycles the shader core active."/>
+-->
+ <event counter="ARM_Mali-T6xx_FRAG_ACTIVE" title="Fragment" name="Fragment active cycles" description="Number of cycles fragment processing was active."/>
+ <event counter="ARM_Mali-T6xx_FRAG_PRIMATIVES" title="Fragment" name="Primitives" description="Number of primitives."/>
+ <event counter="ARM_Mali-T6xx_FRAG_PRIMATIVES_DROPPED" title="Fragment" name="Primitives dropped" description="Primitives dropped due to Polygon List Reader coverage."/>
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLE_DESC" title="Fragment" name="Stall: Waiting for descriptors" description="Number of cycles spent waiting for descriptors / 2."/>
+
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLES_PLR" title="Fragment" name="Stall: Waiting for PLR" description="Cycles spent waiting for Polygon List Reader."/>
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLES_VERT" title="Fragment" name="Stall: Waiting for vertices" description="Cycles spent waiting for vertices."/>
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLES_TRISETUP" title="Fragment" name="Stall: Waiting for trisetup" description="Cycles spent waiting for trisetup."/>
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLES_RAST" title="Fragment" name="Stall: Waiting for rasterizer" description="Cycles spent waiting for rasterizer."/>
+
+ <event counter="ARM_Mali-T6xx_FRAG_THREADS" title="Fragment" name="Fragment threads started" description="Number of fragment threads started."/>
+ <event counter="ARM_Mali-T6xx_FRAG_DUMMY_THREADS" title="Fragment" name="Dummy threads started" description="Number of dummy threads started."/>
+ <event counter="ARM_Mali-T6xx_FRAG_QUADS_RAST" title="Fragment" name="Quads rasterized" description="Number of quads rasterized."/>
+ <event counter="ARM_Mali-T6xx_FRAG_QUADS_EZS_TEST" title="Fragment" name="Quads doing early ZS test" description="Number of quads doing early ZS test."/>
+
+ <event counter="ARM_Mali-T6xx_FRAG_QUADS_EZS_KILLED" title="Fragment" name="Quads killed early ZS test" description="Number of quads killed in early ZS test."/>
+ <event counter="ARM_Mali-T6xx_FRAG_QUADS_LZS_TEST" title="Fragment" name="Threads doing late ZS test" description="Number of threads doing in late ZS test."/>
+ <event counter="ARM_Mali-T6xx_FRAG_QUADS_LZS_KILLED" title="Fragment" name="Threads killed late ZS test" description="Number of threads killed in late ZS test."/>
+ <event counter="ARM_Mali-T6xx_FRAG_CYCLE_NO_TILE" title="Fragment" name="cycles waiting for physical tile buffers" description="Number of cycles spent waiting while no physical tile buffers were available for rendering."/>
+
+ <event counter="ARM_Mali-T6xx_FRAG_NUM_TILES" title="Fragment" name="Tiles rendered" description="Number of tiles rendered."/>
+ <event counter="ARM_Mali-T6xx_FRAG_TRANS_ELIM" title="Fragment" name="Transaction elimination signature matches" description="Transaction elimination signature matches."/>
+ <event counter="ARM_Mali-T6xx_COMPUTE_ACTIVE" title="Compute" name="Compute active cycles" description="Number of cycles compute processing was active."/>
+ <event counter="ARM_Mali-T6xx_COMPUTE_TASKS" title="Compute" name="Tasks" description="Number of tasks."/>
+
+ <event counter="ARM_Mali-T6xx_COMPUTE_THREADS" title="Compute" name="Compute threads started" description="Number of compute threads started."/>
+ <event counter="ARM_Mali-T6xx_COMPUTE_CYCLES_DESC" title="Compute" name="Stall: Waiting for descriptors" description="Number of cycles spent waiting for descriptors."/>
+ <event counter="ARM_Mali-T6xx_TRIPIPE_ACTIVE" title="Tripipe" name="Tripipe active cycles" description="Number of cycles the Tripipe was active."/>
+ <event counter="ARM_Mali-T6xx_ARITH_WORDS" title="Arithmetic Pipeline" name="Instructions per pipe" description="Number of instruction words in the arithmethic pipelines, divided by the number of arithmethic pipelines."/>
+
+ <event counter="ARM_Mali-T6xx_ARITH_CYCLES_REG" title="Arithmetic Pipeline" name="Stall: Register scheduling" description="Number of cycles lost in the arithmethic pipelines due to register scheduling, divided by the number of arithmethic pipelines."/>
+ <event counter="ARM_Mali-T6xx_ARITH_CYCLES_L0" title="Arithmetic Pipeline" name="Stall: Icache miss per pipe" description="Number of cycles lost in the arithmethic pipelines due to L0 instruction cache misses, divided by the number of arithmethic pipelines."/>
+ <event counter="ARM_Mali-T6xx_ARITH_FRAG_DEPEND" title="Arithmetic Pipeline" name="Stall: Fragment dependency fails per pipe" description="Number of fragment dependency check failures in the arithmethic pipelines, divided by the number of arithmethic pipelines."/>
+ <event counter="ARM_Mali-T6xx_LS_WORDS" title="Load/Store Pipeline" name="Load/Store instruction words completed" description="Number of instruction words completed in the Load/Store pipeline."/>
+
+ <event counter="ARM_Mali-T6xx_LS_ISSUES" title="Load/Store Pipeline" name="Full pipeline issues" description="Number of full pipeline issues in the Load/Store pipeline."/>
+ <event counter="ARM_Mali-T6xx_LS_RESTARTS" title="Load/Store Pipeline" name="Stall: Unpairable instruction" description="Number of restarts due to unpairable instructions in the Load/Store pipeline."/>
+ <event counter="ARM_Mali-T6xx_LS_REISSUES_MISS" title="Load/Store Pipeline" name="Stall: Cache misses" description="Number of full pipe re-issues due to cache misses or TLB misses in the Load/Store pipeline."/>
+ <event counter="ARM_Mali-T6xx_LS_REISSUES_VD" title="Load/Store Pipeline" name="Stall: Varying misses" description="Number of full pipe re-issues due to varying data being unavailable in the Load/Store pipeline."/>
+
+ <event counter="ARM_Mali-T6xx_LS_REISSUE_ATTRIB_MISS" title="Load/Store Pipeline" name="Stall: Attribute cache misses" description="Number of full pipe re-issues due to attribute cache misses in the Load/Store pipeline."/>
+ <event counter="ARM_Mali-T6xx_LS_NO_WB" title="Load/Store Pipeline" name="Writeback not used, pause buffer used" description="Writeback not used, and something into pause buffer in the Load/Store pipeline."/>
+ <event counter="ARM_Mali-T6xx_TEX_WORDS" title="Texturing Pipeline" name="Texturing pipeline instruction words completed" description="Number of instruction words completed in the texturing pipeline."/>
+ <event counter="ARM_Mali-T6xx_TEX_BUBBLES" title="Texturing Pipeline" name="Stall: Waiting for barrier" description="Number of bubbles with threads waiting for barrier in the texturing pipeline."/>
+
+ <event counter="ARM_Mali-T6xx_TEX_WORDS_L0" title="Texturing Pipeline" name="Stall: Icache misses" description="Number of instruction words in the texturing pipeline restart loop 1 due to L0 instruction cache miss."/>
+ <event counter="ARM_Mali-T6xx_TEX_WORDS_DESC" title="Texturing Pipeline" name="Stall: Descriptor misses" description="Number of instruction words in the texturing pipeline restart loop 1 due to descriptor misses."/>
+ <event counter="ARM_Mali-T6xx_TEX_THREADS" title="Texturing Pipeline" name="Threads in loop 2" description="Number of threads in the texturing pipeline through loop 2 address calculation."/>
+ <event counter="ARM_Mali-T6xx_TEX_RECIRC_FMISS" title="Texturing Pipeline" name="Stall: Texture cache misses" description="Number of instructions in the texturing pipeline recirculated due to complete texture cache miss."/>
+
+ <event counter="ARM_Mali-T6xx_TEX_RECIRC_DESC" title="Texturing Pipeline" name="Stall: Surface Descriptor misses" description="Number of instructions in the texturing pipeline recirculated due to surface descriptor miss."/>
+ <event counter="ARM_Mali-T6xx_TEX_RECIRC_MULTI" title="Texturing Pipeline" name="Reissue: multipass" description="Number of instructions in the texturing pipeline recirculated due to multipass."/>
+ <event counter="ARM_Mali-T6xx_TEX_RECIRC_PMISS" title="Texturing Pipeline" name="Stall: Partial misses" description="Number of instructions in the texturing pipeline recirculated due to partial cache miss."/>
+ <event counter="ARM_Mali-T6xx_TEX_RECIRC_CONF" title="Texturing Pipeline" name="Stall: Bank conflict" description="Number of instructions in the texturing pipeline recirculated due to texture cache bank conflict."/>
+
+ <event counter="ARM_Mali-T6xx_LSC_READ_HITS" title="Load/Store Cache" name="Read hits" description="Number of read hits in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_READ_MISSES" title="Load/Store Cache" name="Read misses" description="Number of read misses in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_WRITE_HITS" title="Load/Store Cache" name="Write hits" description="Number of write hits in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_WRITE_MISSES" title="Load/Store Cache" name="Write misses" description="Number of write misses in the Load/Store Cache."/>
+
+ <event counter="ARM_Mali-T6xx_LSC_ATOMIC_HITS" title="Load/Store Cache" name="Atomic hits" description="Number of atomic hits in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_ATOMIC_MISSES" title="Load/Store Cache" name="Atomic misses" description="Number of atomic misses in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_LINE_FETCHES" title="Load/Store Cache" name="Line fetches" description="Number of line fetches in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_LSC_DIRTY_LINE" title="Load/Store Cache" name="Dirty line evictions" description="Number of dirty line evictions in the Load/Store Cache."/>
+
+ <event counter="ARM_Mali-T6xx_LSC_SNOOPS" title="Load/Store Cache" name="Snoops in LSC" description="Snoops in the Load/Store Cache."/>
+ <event counter="ARM_Mali-T6xx_AXI_TLB_STALL" title="AXI uTLB" name="Stall: TLB AXI address channel" description="AXI address channel stall generated."/>
+ <event counter="ARM_Mali-T6xx_AXI_TLB_MIESS" title="AXI uTLB" name="Stall: TLB Cache misses" description="New request cache miss."/>
+ <event counter="ARM_Mali-T6xx_AXI_TLB_TRANSACTION" title="AXI uTLB" name="AXI transactions" description="Number of AXI transactions."/>
+
+ <event counter="ARM_Mali-T6xx_LS_TLB_MISS" title="LS uTLB" name="Cache misses per interface" description="Number of cache misses / number of interfaces."/>
+ <event counter="ARM_Mali-T6xx_LS_TLB_HIT" title="LS uTLB" name="Cache hits per interface" description="Number of cache hits / number of interfaces."/>
+ <event counter="ARM_Mali-T6xx_AXI_BEATS_READ" title="AXI" name="Beats read" description="Number of beats read."/>
+ <event counter="ARM_Mali-T6xx_AXI_BEATS_WRITTEN" title="AXI" name="Beats written" description="Number of beats written."/>
+ </category>
+
+ <category name="Mali-T6xx-L2AndMMU" per_cpu="no">
+ <event counter="ARM_Mali-T6xx_MMU_TABLE_WALK" title="MMU" name="Page table walks" description="Number of page table walks started."/>
+ <event counter="ARM_Mali-T6xx_MMU_REPLAY_MISS" title="MMU" name="Cache misses from replay buffer" description="Number of cache misses on accesses from replay buffer."/>
+ <event counter="ARM_Mali-T6xx_MMU_REPLAY_FULL" title="MMU" name="MMU full replay buffer (cycles)" description="Number of cycles replay buffer is full."/>
+ <event counter="ARM_Mali-T6xx_MMU_NEW_MISS" title="MMU" name="Cache misses on new requests" description="Number of cache misses on new requests."/>
+
+ <event counter="ARM_Mali-T6xx_MMU_HIT" title="MMU" name="MMU cache hits" description="Number of cache hits."/>
+
+ <event counter="ARM_Mali-T6xx_UTLB_STALL" title="uTLB" name="Stall on input AXI (cycles)" description="Cycles with stall on input AXI address channel."/>
+ <event counter="ARM_Mali-T6xx_UTLB_REPLAY_MISS" title="uTLB" name="Cache misses from replay buffer" description="Number of cache misses on accesses from replay buffer."/>
+ <event counter="ARM_Mali-T6xx_UTLB_REPLAY_FULL" title="uTLB" name="uTLB full replay buffer (cycles)" description="Number of cycles replay buffer is full."/>
+ <event counter="ARM_Mali-T6xx_UTLB_NEW_MISS" title="uTLB" name="Cache misses on new requests" description="Number of cache misses on new requests."/>
+
+ <event counter="ARM_Mali-T6xx_UTLB_HIT" title="uTLB" name="uTLB cache hits" description="Number of cache hits."/>
+
+ <event counter="ARM_Mali-T6xx_L2_WRITE_BEATS" title="L2 Cache" name="External bus write beats" description="External bus write beats."/>
+ <event counter="ARM_Mali-T6xx_L2_READ_BEATS" title="L2 Cache" name="External bus read beats" description="External bus read beats."/>
+
+ <event counter="ARM_Mali-T6xx_L2_ANY_LOOKUP" title="L2 Cache" name="Transaction/snoop tag lookups" description="A transaction or a snoop response completes the tag lookup."/>
+ <event counter="ARM_Mali-T6xx_L2_READ_LOOKUP" title="L2 Cache" name="Read transaction tag lookups" description="Any read transaction completes the tag lookup."/>
+ <event counter="ARM_Mali-T6xx_L2_SREAD_LOOKUP" title="L2 Cache" name="Shareable read transaction tag lookups" description="Shareable read transaction tag lookup."/>
+ <event counter="ARM_Mali-T6xx_L2_READ_REPLAY" title="L2 Cache" name="Replayed read transactions" description="Any read transaction is replayed (not serialized)."/>
+
+ <event counter="ARM_Mali-T6xx_L2_READ_SNOOP" title="L2 Cache" name="Read transaction snoops" description="Read transaction issues a snoop."/>
+ <event counter="ARM_Mali-T6xx_L2_READ_HIT" title="L2 Cache" name="L2 Cache read hits" description="A read hits in the L2 Cache."/>
+ <event counter="ARM_Mali-T6xx_L2_CLEAN_MISS" title="L2 Cache" name="CleanUnique misses" description="A CleanUnique misses in the L2 Cache, line allocated without a linefill."/>
+ <event counter="ARM_Mali-T6xx_L2_WRITE_LOOKUP" title="L2 Cache" name="Write transaction tag lookups" description="Any write transaction completes the tag lookup."/>
+
+ <event counter="ARM_Mali-T6xx_L2_SWRITE_LOOKUP" title="L2 write" name="Shareable write transaction tag lookup" description="Shareable write transaction tag lookup."/>
+ <event counter="ARM_Mali-T6xx_L2_WRITE_REPLAY" title="L2 write" name="Replayed write transaction" description="Any write transaction is replayed (not serialized)."/>
+ <event counter="ARM_Mali-T6xx_L2_WRITE_SNOOP" title="L2 write" name="Write transaction snoops" description="Write transaction issues a snoop."/>
+ <event counter="ARM_Mali-T6xx_L2_WRITE_HIT" title="L2 write" name="L2 write hits" description="A write hits in the L2 Cache."/>
+
+ <event counter="ARM_Mali-T6xx_L2_EXT_READ_FULL" title="L2 Cache" name="External read attempted with full BIU" description="External read attempted, but the BIU read buffer is full."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_READ_HALF" title="L2 Cache" name="External read requested with more then half full BIU" description="External read requested when the BIU read buffer is more than half full."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_WRITE_FULL" title="L2 Cache" name="External write attempted with full BIU" description="External write attempted, but the BIU write buffer is full."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_WRITE_HALF" title="L2 Cache" name="External write requested with more then half full BIU" description="External write requested when the BIU write buffer is more than half full."/>
+
+ <event counter="ARM_Mali-T6xx_L2_EXT_READ" title="L2 Cache" name="External read" description="External read."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_READ_LINE" title="L2 Cache" name="External read - linefill" description="External read - linefill."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_WRITE" title="L2 Cache" name="External write." description="External write."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_WRITE_LINE" title="L2 Cache" name="External write - writeback." description="External write - writeback."/>
+
+ <event counter="ARM_Mali-T6xx_L2_EXT_WRITE_SMALL" title="L2 Cache" name="External write - burst size less than 64B" description="External write - burst size less than 64B."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_BARRIER" title="L2 Cache" name="External barrier" description="External barrier (even if disabled)."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_AR_STALL" title="L2 Cache" name="External interconnect stalls (AR)" description="A valid read address (AR) is stalled by the external interconnect."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_R_BUF_FULL" title="L2 Cache" name="External read stalls (full buffer)" description="External read stalled due to the response buffer being full."/>
+
+ <event counter="ARM_Mali-T6xx_L2_EXT_RD_BUF_FULL" title="L2 Cache" name="External read not allocated" description="External read could not allocate an entry in the read data buffer."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_R_RAW" title="L2 Cache" name="External read stalled (RAW)" description="External read stalled due to the RAW hazard."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_W_STALL" title="L2 Cache" name="External interconnect stalls (W channel)" description="Valid write data (W channel) is stalled by the external interconnect."/>
+ <event counter="ARM_Mali-T6xx_L2_EXT_W_BUF_FULL" title="L2 Cache" name="External write stalls (full buffer)" description="External write stalled due to the response buffer being full."/>
+
+ <event counter="ARM_Mali-T6xx_L2_EXT_R_W_HAZARD" title="L2 Cache" name="External read stalled (WAW or WAR)" description="External read stalled due to the WAW or WAR hazard."/>
+ <event counter="ARM_Mali-T6xx_L2_TAG_HAZARD" title="L2 Cache" name="Replayed transactions due to tag hazard" description="A transaction will be replayed due to a tag hazard."/>
+ <event counter="ARM_Mali-T6xx_L2_SNOOP_FULL" title="L2 Cache" name="Cycles with full snoop buffer" description="Counts every cycle when the snoop buffer is full."/>
+ <event counter="ARM_Mali-T6xx_L2_REPLAY_FULL" title="L2 Cache" name="Cycles with full replay buffer" description="Counts every cycle when the replay buffer is full."/>
+ </category>
diff --git a/tools/gator/daemon/events-Scorpion.xml b/tools/gator/daemon/events-Scorpion.xml
new file mode 100644
index 000000000000..1642e85c609f
--- /dev/null
+++ b/tools/gator/daemon/events-Scorpion.xml
@@ -0,0 +1,107 @@
+ <counter_set name="Scorpion_cnt" count="4"/>
+ <category name="Scorpion" counter_set="Scorpion_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="Scorpion_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Program Counter" name="SW change" description="Software change of PC, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0e" title="Branch" name="Procedure Return" description="Procedure return architecturally executed (not by exceptions)"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x4c" title="Scorpion" name="ICACHE_EXPL_INV" description="I-cache explicit invalidates"/>
+ <event event="0x4d" title="Scorpion" name="ICACHE_MISS" description="I-cache misses"/>
+ <event event="0x4e" title="Scorpion" name="ICACHE_ACCESS" description="I-cache accesses"/>
+ <event event="0x4f" title="Scorpion" name="ICACHE_CACHEREQ_L2" description="I-cache cacheable requests to L2"/>
+ <event event="0x50" title="Scorpion" name="ICACHE_NOCACHE_L2" description="I-cache non-cacheable requests to L2"/>
+ <event event="0x51" title="Scorpion" name="HIQUP_NOPED" description="Conditional instructions HIQUPs NOPed"/>
+ <event event="0x52" title="Scorpion" name="DATA_ABORT" description="Interrupts and Exceptions Data Abort"/>
+ <event event="0x53" title="Scorpion" name="IRQ" description="Interrupts and Exceptions IRQ"/>
+ <event event="0x54" title="Scorpion" name="FIQ" description="Interrupts and Exceptions FIQ"/>
+ <event event="0x55" title="Scorpion" name="ALL_EXCPT" description="Interrupts and Exceptions All interrupts"/>
+ <event event="0x56" title="Scorpion" name="UNDEF" description="Interrupts and Exceptions Undefined"/>
+ <event event="0x57" title="Scorpion" name="SVC" description="Interrupts and Exceptions SVC"/>
+ <event event="0x58" title="Scorpion" name="SMC" description="Interrupts and Exceptions SMC"/>
+ <event event="0x59" title="Scorpion" name="PREFETCH_ABORT" description="Interrupts and Exceptions Prefetch Abort"/>
+ <event event="0x5a" title="Scorpion" name="INDEX_CHECK" description="Interrupts and Exceptions Index Check"/>
+ <event event="0x5b" title="Scorpion" name="NULL_CHECK" description="Interrupts and Exceptions Null Check"/>
+ <event event="0x5c" title="Scorpion" name="EXPL_ICIALLU" description="I-cache and BTAC Invalidates Explicit ICIALLU"/>
+ <event event="0x5d" title="Scorpion" name="IMPL_ICIALLU" description="I-cache and BTAC Invalidates Implicit ICIALLU"/>
+ <event event="0x5e" title="Scorpion" name="NONICIALLU_BTAC_INV" description="I-cache and BTAC Invalidates Non-ICIALLU BTAC Invalidate"/>
+ <event event="0x5f" title="Scorpion" name="ICIMVAU_IMPL_ICIALLU" description="I-cache and BTAC Invalidates ICIMVAU-implied ICIALLU"/>
+ <event event="0x60" title="Scorpion" name="SPIPE_ONLY_CYCLES" description="Issue S-pipe only issue cycles"/>
+ <event event="0x61" title="Scorpion" name="XPIPE_ONLY_CYCLES" description="Issue X-pipe only issue cycles"/>
+ <event event="0x62" title="Scorpion" name="DUAL_CYCLES" description="Issue dual issue cycles"/>
+ <event event="0x63" title="Scorpion" name="DISPATCH_ANY_CYCLES" description="Dispatch any dispatch cycles"/>
+ <event event="0x64" title="Scorpion" name="FIFO_FULLBLK_CMT" description="Commits Trace FIFO full Blk CMT"/>
+ <event event="0x65" title="Scorpion" name="FAIL_COND_INST" description="Conditional instructions failing conditional instrs (excluding branches)"/>
+ <event event="0x66" title="Scorpion" name="PASS_COND_INST" description="Conditional instructions passing conditional instrs (excluding branches)"/>
+ <event event="0x67" title="Scorpion" name="ALLOW_VU_CLK" description="Unit Clock Gating Allow VU Clks"/>
+ <event event="0x68" title="Scorpion" name="VU_IDLE" description="Unit Clock Gating VU Idle"/>
+ <event event="0x69" title="Scorpion" name="ALLOW_L2_CLK" description="Unit Clock Gating Allow L2 Clks"/>
+ <event event="0x6a" title="Scorpion" name="L2_IDLE" description="Unit Clock Gating L2 Idle"/>
+ <event event="0x6b" title="Scorpion" name="DTLB_IMPL_INV_SCTLR_DACR" description="DTLB implicit invalidates writes to SCTLR and DACR"/>
+ <event event="0x6c" title="Scorpion" name="DTLB_EXPL_INV" description="DTLB explicit invalidates"/>
+ <event event="0x6d" title="Scorpion" name="DTLB_MISS" description="DTLB misses"/>
+ <event event="0x6e" title="Scorpion" name="DTLB_ACCESS" description="DTLB accesses"/>
+ <event event="0x6f" title="Scorpion" name="ITLB_MISS" description="ITLB misses"/>
+ <event event="0x70" title="Scorpion" name="ITLB_IMPL_INV" description="ITLB implicit ITLB invalidates"/>
+ <event event="0x71" title="Scorpion" name="ITLB_EXPL_INV" description="ITLB explicit ITLB invalidates"/>
+ <event event="0x72" title="Scorpion" name="UTLB_D_MISS" description="UTLB d-side misses"/>
+ <event event="0x73" title="Scorpion" name="UTLB_D_ACCESS" description="UTLB d-side accesses"/>
+ <event event="0x74" title="Scorpion" name="UTLB_I_MISS" description="UTLB i-side misses"/>
+ <event event="0x75" title="Scorpion" name="UTLB_I_ACCESS" description="UTLB i-side accesses"/>
+ <event event="0x76" title="Scorpion" name="UTLB_INV_ASID" description="UTLB invalidate by ASID"/>
+ <event event="0x77" title="Scorpion" name="UTLB_INV_MVA" description="UTLB invalidate by MVA"/>
+ <event event="0x78" title="Scorpion" name="UTLB_INV_ALL" description="UTLB invalidate all"/>
+ <event event="0x79" title="Scorpion" name="S2_HOLD_RDQ_UNAVAIL" description="S2 hold RDQ unavail"/>
+ <event event="0x7a" title="Scorpion" name="S2_HOLD" description="S2 hold"/>
+ <event event="0x7b" title="Scorpion" name="S2_HOLD_DEV_OP" description="S2 hold device op"/>
+ <event event="0x7c" title="Scorpion" name="S2_HOLD_ORDER" description="S2 hold strongly ordered op"/>
+ <event event="0x7d" title="Scorpion" name="S2_HOLD_BARRIER" description="S2 hold barrier"/>
+ <event event="0x7e" title="Scorpion" name="VIU_DUAL_CYCLE" description="Scorpion VIU dual cycle"/>
+ <event event="0x7f" title="Scorpion" name="VIU_SINGLE_CYCLE" description="Scorpion VIU single cycle"/>
+ <event event="0x80" title="Scorpion" name="VX_PIPE_WAR_STALL_CYCLES" description="Scorpion VX pipe WAR cycles"/>
+ <event event="0x81" title="Scorpion" name="VX_PIPE_WAW_STALL_CYCLES" description="Scorpion VX pipe WAW cycles"/>
+ <event event="0x82" title="Scorpion" name="VX_PIPE_RAW_STALL_CYCLES" description="Scorpion VX pipe RAW cycles"/>
+ <event event="0x83" title="Scorpion" name="VX_PIPE_LOAD_USE_STALL" description="Scorpion VX pipe load use stall"/>
+ <event event="0x84" title="Scorpion" name="VS_PIPE_WAR_STALL_CYCLES" description="Scorpion VS pipe WAR stall cycles"/>
+ <event event="0x85" title="Scorpion" name="VS_PIPE_WAW_STALL_CYCLES" description="Scorpion VS pipe WAW stall cycles"/>
+ <event event="0x86" title="Scorpion" name="VS_PIPE_RAW_STALL_CYCLES" description="Scorpion VS pipe RAW stall cycles"/>
+ <event event="0x87" title="Scorpion" name="EXCEPTIONS_INV_OPERATION" description="Scorpion invalid operation exceptions"/>
+ <event event="0x88" title="Scorpion" name="EXCEPTIONS_DIV_BY_ZERO" description="Scorpion divide by zero exceptions"/>
+ <event event="0x89" title="Scorpion" name="COND_INST_FAIL_VX_PIPE" description="Scorpion conditional instruction fail VX pipe"/>
+ <event event="0x8a" title="Scorpion" name="COND_INST_FAIL_VS_PIPE" description="Scorpion conditional instruction fail VS pipe"/>
+ <event event="0x8b" title="Scorpion" name="EXCEPTIONS_OVERFLOW" description="Scorpion overflow exceptions"/>
+ <event event="0x8c" title="Scorpion" name="EXCEPTIONS_UNDERFLOW" description="Scorpion underflow exceptions"/>
+ <event event="0x8d" title="Scorpion" name="EXCEPTIONS_DENORM" description="Scorpion denorm exceptions"/>
+ <event event="0x8e" title="Scorpion" name="BANK_AB_HIT" description="L2 hit rates bank A/B hits"/>
+ <event event="0x8f" title="Scorpion" name="BANK_AB_ACCESS" description="L2 hit rates bank A/B accesses"/>
+ <event event="0x90" title="Scorpion" name="BANK_CD_HIT" description="L2 hit rates bank C/D hits"/>
+ <event event="0x91" title="Scorpion" name="BANK_CD_ACCESS" description="L2 hit rates bank C/D accesses"/>
+ <event event="0x92" title="Scorpion" name="BANK_AB_DSIDE_HIT" description="L2 hit rates bank A/B d-side hits"/>
+ <event event="0x93" title="Scorpion" name="BANK_AB_DSIDE_ACCESS" description="L2 hit rates bank A/B d-side accesses"/>
+ <event event="0x94" title="Scorpion" name="BANK_CD_DSIDE_HIT" description="L2 hit rates bank C/D d-side hits"/>
+ <event event="0x95" title="Scorpion" name="BANK_CD_DSIDE_ACCESS" description="L2 hit rates bank C/D d-side accesses"/>
+ <event event="0x96" title="Scorpion" name="BANK_AB_ISIDE_HIT" description="L2 hit rates bank A/B i-side hits"/>
+ <event event="0x97" title="Scorpion" name="BANK_AB_ISIDE_ACCESS" description="L2 hit rates bank A/B i-side accesses"/>
+ <event event="0x98" title="Scorpion" name="BANK_CD_ISIDE_HIT" description="L2 hit rates bank C/D i-side hits"/>
+ <event event="0x99" title="Scorpion" name="BANK_CD_ISIDE_ACCESS" description="L2 hit rates bank C/D i-side accesses"/>
+ <event event="0x9a" title="Scorpion" name="ISIDE_RD_WAIT" description="fills and castouts cycles that i-side RD requests wait on data from bus"/>
+ <event event="0x9b" title="Scorpion" name="DSIDE_RD_WAIT" description="fills and castouts cycles that d-side RD requests wait on data from bus"/>
+ <event event="0x9c" title="Scorpion" name="BANK_BYPASS_WRITE" description="fills and castouts bank bypass writes"/>
+ <event event="0x9d" title="Scorpion" name="BANK_AB_NON_CASTOUT" description="fills and castouts bank A/B non-castout writes to bus"/>
+ <event event="0x9e" title="Scorpion" name="BANK_AB_L2_CASTOUT" description="fills and castouts bank A/B L2 castouts (granules)"/>
+ <event event="0x9f" title="Scorpion" name="BANK_CD_NON_CASTOUT" description="fills and castouts bank C/D non-castout writes to bus"/>
+ <event event="0xa0" title="Scorpion" name="BANK_CD_L2_CASTOUT" description="fills and castouts bank C/D L2 castouts (granules)"/>
+ </category>
diff --git a/tools/gator/daemon/events-ScorpionMP.xml b/tools/gator/daemon/events-ScorpionMP.xml
new file mode 100644
index 000000000000..309f1030c1fc
--- /dev/null
+++ b/tools/gator/daemon/events-ScorpionMP.xml
@@ -0,0 +1,90 @@
+ <counter_set name="ScorpionMP_cnt" count="4"/>
+ <category name="ScorpionMP" counter_set="ScorpionMP_cnt" per_cpu="yes" supports_event_based_sampling="yes">
+ <event counter="ScorpionMP_ccnt" title="Clock" name="Cycles" display="hertz" units="Hz" average_selection="yes" description="The number of core clock cycles"/>
+ <event event="0x00" title="Software" name="Increment" description="Incremented only on writes to the Software Increment Register"/>
+ <event event="0x01" title="Cache" name="Instruction refill" description="Instruction fetch that causes a refill of at least the level of instruction or unified cache closest to the processor"/>
+ <event event="0x02" title="Cache" name="Inst TLB refill" description="Instruction fetch that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x03" title="Cache" name="Data refill" description="Memory Read or Write operation that causes a refill of at least the level of data or unified cache closest to the processor"/>
+ <event event="0x04" title="Cache" name="Data access" description="Memory Read or Write operation that causes a cache access to at least the level of data or unified cache closest to the processor"/>
+ <event event="0x05" title="Cache" name="Data TLB refill" description="Memory Read or Write operation that causes a TLB refill of at least the level of TLB closest to the processor"/>
+ <event event="0x06" title="Instruction" name="Memory read" description="Memory-reading instruction architecturally executed"/>
+ <event event="0x07" title="Instruction" name="Memory write" description="Memory-writing instruction architecturally executed"/>
+ <event event="0x08" title="Instruction" name="Executed" description="Instruction architecturally executed"/>
+ <event event="0x09" title="Exception" name="Taken" description="Exceptions taken"/>
+ <event event="0x0a" title="Exception" name="Return" description="Exception return architecturally executed"/>
+ <event event="0x0b" title="Instruction" name="CONTEXTIDR" description="Instruction that writes to the CONTEXTIDR architecturally executed"/>
+ <event event="0x0c" title="Program Counter" name="SW change" description="Software change of PC, except by an exception, architecturally executed"/>
+ <event event="0x0d" title="Branch" name="Immediate" description="Immediate branch architecturally executed"/>
+ <event event="0x0e" title="Branch" name="Procedure Return" description="Procedure return architecturally executed (not by exceptions)"/>
+ <event event="0x0f" title="Memory" name="Unaligned access" description="Unaligned access architecturally executed"/>
+ <event event="0x10" title="Branch" name="Mispredicted" description="Branch mispredicted or not predicted"/>
+ <event event="0x12" title="Branch" name="Potential prediction" description="Branch or other change in program flow that could have been predicted by the branch prediction resources of the processor"/>
+ <event event="0x4c" title="Scorpion" name="ICACHE_EXPL_INV" description="I-cache explicit invalidates"/>
+ <event event="0x4d" title="Scorpion" name="ICACHE_MISS" description="I-cache misses"/>
+ <event event="0x4e" title="Scorpion" name="ICACHE_ACCESS" description="I-cache accesses"/>
+ <event event="0x4f" title="Scorpion" name="ICACHE_CACHEREQ_L2" description="I-cache cacheable requests to L2"/>
+ <event event="0x50" title="Scorpion" name="ICACHE_NOCACHE_L2" description="I-cache non-cacheable requests to L2"/>
+ <event event="0x51" title="Scorpion" name="HIQUP_NOPED" description="Conditional instructions HIQUPs NOPed"/>
+ <event event="0x52" title="Scorpion" name="DATA_ABORT" description="Interrupts and Exceptions Data Abort"/>
+ <event event="0x53" title="Scorpion" name="IRQ" description="Interrupts and Exceptions IRQ"/>
+ <event event="0x54" title="Scorpion" name="FIQ" description="Interrupts and Exceptions FIQ"/>
+ <event event="0x55" title="Scorpion" name="ALL_EXCPT" description="Interrupts and Exceptions All interrupts"/>
+ <event event="0x56" title="Scorpion" name="UNDEF" description="Interrupts and Exceptions Undefined"/>
+ <event event="0x57" title="Scorpion" name="SVC" description="Interrupts and Exceptions SVC"/>
+ <event event="0x58" title="Scorpion" name="SMC" description="Interrupts and Exceptions SMC"/>
+ <event event="0x59" title="Scorpion" name="PREFETCH_ABORT" description="Interrupts and Exceptions Prefetch Abort"/>
+ <event event="0x5a" title="Scorpion" name="INDEX_CHECK" description="Interrupts and Exceptions Index Check"/>
+ <event event="0x5b" title="Scorpion" name="NULL_CHECK" description="Interrupts and Exceptions Null Check"/>
+ <event event="0x5c" title="Scorpion" name="EXPL_ICIALLU" description="I-cache and BTAC Invalidates Explicit ICIALLU"/>
+ <event event="0x5d" title="Scorpion" name="IMPL_ICIALLU" description="I-cache and BTAC Invalidates Implicit ICIALLU"/>
+ <event event="0x5e" title="Scorpion" name="NONICIALLU_BTAC_INV" description="I-cache and BTAC Invalidates Non-ICIALLU BTAC Invalidate"/>
+ <event event="0x5f" title="Scorpion" name="ICIMVAU_IMPL_ICIALLU" description="I-cache and BTAC Invalidates ICIMVAU-implied ICIALLU"/>
+ <event event="0x60" title="Scorpion" name="SPIPE_ONLY_CYCLES" description="Issue S-pipe only issue cycles"/>
+ <event event="0x61" title="Scorpion" name="XPIPE_ONLY_CYCLES" description="Issue X-pipe only issue cycles"/>
+ <event event="0x62" title="Scorpion" name="DUAL_CYCLES" description="Issue dual issue cycles"/>
+ <event event="0x63" title="Scorpion" name="DISPATCH_ANY_CYCLES" description="Dispatch any dispatch cycles"/>
+ <event event="0x64" title="Scorpion" name="FIFO_FULLBLK_CMT" description="Commits Trace FIFO full Blk CMT"/>
+ <event event="0x65" title="Scorpion" name="FAIL_COND_INST" description="Conditional instructions failing conditional instrs (excluding branches)"/>
+ <event event="0x66" title="Scorpion" name="PASS_COND_INST" description="Conditional instructions passing conditional instrs (excluding branches)"/>
+ <event event="0x67" title="Scorpion" name="ALLOW_VU_CLK" description="Unit Clock Gating Allow VU Clks"/>
+ <event event="0x68" title="Scorpion" name="VU_IDLE" description="Unit Clock Gating VU Idle"/>
+ <event event="0x69" title="Scorpion" name="ALLOW_L2_CLK" description="Unit Clock Gating Allow L2 Clks"/>
+ <event event="0x6a" title="Scorpion" name="L2_IDLE" description="Unit Clock Gating L2 Idle"/>
+ <event event="0x6b" title="Scorpion" name="DTLB_IMPL_INV_SCTLR_DACR" description="DTLB implicit invalidates writes to SCTLR and DACR"/>
+ <event event="0x6c" title="Scorpion" name="DTLB_EXPL_INV" description="DTLB explicit invalidates"/>
+ <event event="0x6d" title="Scorpion" name="DTLB_MISS" description="DTLB misses"/>
+ <event event="0x6e" title="Scorpion" name="DTLB_ACCESS" description="DTLB accesses"/>
+ <event event="0x6f" title="Scorpion" name="ITLB_MISS" description="ITLB misses"/>
+ <event event="0x70" title="Scorpion" name="ITLB_IMPL_INV" description="ITLB implicit ITLB invalidates"/>
+ <event event="0x71" title="Scorpion" name="ITLB_EXPL_INV" description="ITLB explicit ITLB invalidates"/>
+ <event event="0x72" title="Scorpion" name="UTLB_D_MISS" description="UTLB d-side misses"/>
+ <event event="0x73" title="Scorpion" name="UTLB_D_ACCESS" description="UTLB d-side accesses"/>
+ <event event="0x74" title="Scorpion" name="UTLB_I_MISS" description="UTLB i-side misses"/>
+ <event event="0x75" title="Scorpion" name="UTLB_I_ACCESS" description="UTLB i-side accesses"/>
+ <event event="0x76" title="Scorpion" name="UTLB_INV_ASID" description="UTLB invalidate by ASID"/>
+ <event event="0x77" title="Scorpion" name="UTLB_INV_MVA" description="UTLB invalidate by MVA"/>
+ <event event="0x78" title="Scorpion" name="UTLB_INV_ALL" description="UTLB invalidate all"/>
+ <event event="0x79" title="Scorpion" name="S2_HOLD_RDQ_UNAVAIL" description="S2 hold RDQ unavail"/>
+ <event event="0x7a" title="Scorpion" name="S2_HOLD" description="S2 hold"/>
+ <event event="0x7b" title="Scorpion" name="S2_HOLD_DEV_OP" description="S2 hold device op"/>
+ <event event="0x7c" title="Scorpion" name="S2_HOLD_ORDER" description="S2 hold strongly ordered op"/>
+ <event event="0x7d" title="Scorpion" name="S2_HOLD_BARRIER" description="S2 hold barrier"/>
+ <event event="0x7e" title="Scorpion" name="VIU_DUAL_CYCLE" description="Scorpion VIU dual cycle"/>
+ <event event="0x7f" title="Scorpion" name="VIU_SINGLE_CYCLE" description="Scorpion VIU single cycle"/>
+ <event event="0x80" title="Scorpion" name="VX_PIPE_WAR_STALL_CYCLES" description="Scorpion VX pipe WAR cycles"/>
+ <event event="0x81" title="Scorpion" name="VX_PIPE_WAW_STALL_CYCLES" description="Scorpion VX pipe WAW cycles"/>
+ <event event="0x82" title="Scorpion" name="VX_PIPE_RAW_STALL_CYCLES" description="Scorpion VX pipe RAW cycles"/>
+ <event event="0x83" title="Scorpion" name="VX_PIPE_LOAD_USE_STALL" description="Scorpion VX pipe load use stall"/>
+ <event event="0x84" title="Scorpion" name="VS_PIPE_WAR_STALL_CYCLES" description="Scorpion VS pipe WAR stall cycles"/>
+ <event event="0x85" title="Scorpion" name="VS_PIPE_WAW_STALL_CYCLES" description="Scorpion VS pipe WAW stall cycles"/>
+ <event event="0x86" title="Scorpion" name="VS_PIPE_RAW_STALL_CYCLES" description="Scorpion VS pipe RAW stall cycles"/>
+ <event event="0x87" title="Scorpion" name="EXCEPTIONS_INV_OPERATION" description="Scorpion invalid operation exceptions"/>
+ <event event="0x88" title="Scorpion" name="EXCEPTIONS_DIV_BY_ZERO" description="Scorpion divide by zero exceptions"/>
+ <event event="0x89" title="Scorpion" name="COND_INST_FAIL_VX_PIPE" description="Scorpion conditional instruction fail VX pipe"/>
+ <event event="0x8a" title="Scorpion" name="COND_INST_FAIL_VS_PIPE" description="Scorpion conditional instruction fail VS pipe"/>
+ <event event="0x8b" title="Scorpion" name="EXCEPTIONS_OVERFLOW" description="Scorpion overflow exceptions"/>
+ <event event="0x8c" title="Scorpion" name="EXCEPTIONS_UNDERFLOW" description="Scorpion underflow exceptions"/>
+ <event event="0x8d" title="Scorpion" name="EXCEPTIONS_DENORM" description="Scorpion denorm exceptions"/>
+ <event event="0x8e" title="ScorpionMP" name="NUM_BARRIERS" description="Barriers"/>
+ <event event="0x8f" title="ScorpionMP" name="BARRIER_CYCLES" description="Barrier cycles"/>
+ </category>
diff --git a/tools/gator/daemon/events_footer.xml b/tools/gator/daemon/events_footer.xml
new file mode 100644
index 000000000000..cd2b44665bab
--- /dev/null
+++ b/tools/gator/daemon/events_footer.xml
@@ -0,0 +1 @@
+</events>
diff --git a/tools/gator/daemon/events_header.xml b/tools/gator/daemon/events_header.xml
new file mode 100644
index 000000000000..38ec4c03246e
--- /dev/null
+++ b/tools/gator/daemon/events_header.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<events>
diff --git a/tools/gator/daemon/main.cpp b/tools/gator/daemon/main.cpp
new file mode 100644
index 000000000000..894bad7bc840
--- /dev/null
+++ b/tools/gator/daemon/main.cpp
@@ -0,0 +1,383 @@
+/**
+ * Copyright (C) ARM Limited 2010-2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <ctype.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include "Child.h"
+#include "SessionData.h"
+#include "OlySocket.h"
+#include "Logging.h"
+#include "OlyUtility.h"
+
+#define DEBUG false
+
+extern Child* child;
+int shutdownFilesystem();
+static pthread_mutex_t numSessions_mutex;
+static int numSessions = 0;
+static OlySocket* socket = NULL;
+static bool driverRunningAtStart = false;
+static bool driverMountedAtStart = false;
+
+struct cmdline_t {
+ int port;
+ char* module;
+};
+
+void cleanUp() {
+ if (shutdownFilesystem() == -1) {
+ logg->logMessage("Error shutting down gator filesystem");
+ }
+ delete socket;
+ delete util;
+ delete logg;
+}
+
+// CTRL C Signal Handler
+void handler(int signum) {
+ logg->logMessage("Received signal %d, gator daemon exiting", signum);
+
+ // Case 1: both child and parent receive the signal
+ if (numSessions > 0) {
+ // Arbitrary sleep of 1 second to give time for the child to exit;
+ // if something bad happens, continue the shutdown process regardless
+ sleep(1);
+ }
+
+ // Case 2: only the parent received the signal
+ if (numSessions > 0) {
+ // Kill child threads - the first signal exits gracefully
+ logg->logMessage("Killing process group as %d child was running when signal was received", numSessions);
+ kill(0, SIGINT);
+
+ // Give time for the child to exit
+ sleep(1);
+
+ if (numSessions > 0) {
+ // The second signal force kills the child
+ logg->logMessage("Force kill the child");
+ kill(0, SIGINT);
+ // Again, sleep for 1 second
+ sleep(1);
+
+ if (numSessions > 0) {
+ // Something bad has really happened; the child is not exiting and therefore may hold the /dev/gator resource open
+ printf("Unable to kill the gatord child process, thus gator.ko may still be loaded.\n");
+ }
+ }
+ }
+
+ cleanUp();
+ exit(0);
+}
+
+// Child exit Signal Handler
+void child_exit(int signum) {
+ int status;
+ int pid = wait(&status);
+ if (pid != -1) {
+ pthread_mutex_lock(&numSessions_mutex);
+ numSessions--;
+ pthread_mutex_unlock(&numSessions_mutex);
+ logg->logMessage("Child process %d exited with status %d", pid, status);
+ }
+}
+
+// retval: -1 = failure; 0 = was already mounted; 1 = successfully mounted
+int mountGatorFS() {
+ // If already mounted,
+ if (access("/dev/gator/buffer", F_OK) == 0) {
+ return 0;
+ }
+
+ // else, mount the filesystem
+ mkdir("/dev/gator", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+ if (mount("nodev", "/dev/gator", "gatorfs", 0, NULL) != 0) {
+ return -1;
+ } else {
+ return 1;
+ }
+}
+
+bool init_module (const char * const location) {
+ bool ret(false);
+ const int fd = open(location, O_RDONLY);
+ if (fd >= 0) {
+ struct stat st;
+ if (fstat(fd, &st) == 0) {
+ void * const p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (p != MAP_FAILED) {
+ if (syscall(__NR_init_module, p, st.st_size, "") == 0) {
+ ret = true;
+ }
+ munmap(p, st.st_size);
+ }
+ }
+ close(fd);
+ }
+
+ return ret;
+}
+
+int setupFilesystem(char* module) {
+ int retval;
+
+ // Verify root permissions
+ uid_t euid = geteuid();
+ if (euid) {
+ logg->logError(__FILE__, __LINE__, "gatord must be launched with root privileges");
+ handleException();
+ }
+
+ if (module) {
+ // unmount and rmmod if the module was specified on the commandline, i.e. ensure that the specified module is indeed running
+ shutdownFilesystem();
+
+ // if still mounted
+ if (access("/dev/gator/buffer", F_OK) == 0) {
+ logg->logError(__FILE__, __LINE__, "Unable to remove the running gator.ko. Manually remove the module or use the running module by not specifying one on the commandline");
+ handleException();
+ }
+ }
+
+ retval = mountGatorFS();
+ if (retval == 1) {
+ logg->logMessage("Driver already running at startup");
+ driverRunningAtStart = true;
+ } else if (retval == 0) {
+ logg->logMessage("Driver already mounted at startup");
+ driverRunningAtStart = driverMountedAtStart = true;
+ } else {
+ char command[256]; // arbitrarily large amount
+ char location[256]; // arbitrarily large amount
+
+ if (module) {
+ strncpy(location, module, sizeof(location));
+ } else {
+ // Is the driver co-located in the same directory?
+ if (util->getApplicationFullPath(location, sizeof(location)) != 0) { // allow some buffer space
+ logg->logMessage("Unable to determine the full path of gatord, the cwd will be used");
+ }
+ strncat(location, "gator.ko", sizeof(location) - strlen(location) - 1);
+ }
+
+ if (access(location, F_OK) == -1) {
+ logg->logError(__FILE__, __LINE__, "Unable to locate gator.ko driver:\n >>> gator.ko should be co-located with gatord in the same directory\n >>> OR insmod gator.ko prior to launching gatord\n >>> OR specify the location of gator.ko on the command line");
+ handleException();
+ }
+
+ // Load driver
+ bool success = init_module(location);
+ if (!success) {
+ logg->logMessage("init_module failed, trying insmod");
+ snprintf(command, sizeof(command), "insmod %s >/dev/null 2>&1", location);
+ if (system(command) != 0) {
+ logg->logMessage("Unable to load gator.ko driver with command: %s", command);
+ logg->logError(__FILE__, __LINE__, "Unable to load (insmod) gator.ko driver:\n >>> gator.ko must be built against the current kernel version & configuration\n >>> See dmesg for more details");
+ handleException();
+ }
+ }
+
+ if (mountGatorFS() == -1) {
+ logg->logError(__FILE__, __LINE__, "Unable to mount the gator filesystem needed for profiling.");
+ handleException();
+ }
+ }
+
+ return 0;
+}
+
+int shutdownFilesystem() {
+ if (driverMountedAtStart == false) {
+ umount("/dev/gator");
+ }
+ if (driverRunningAtStart == false) {
+ if (syscall(__NR_delete_module, "gator", O_NONBLOCK) != 0) {
+ logg->logMessage("delete_module failed, trying rmmod");
+ if (system("rmmod gator >/dev/null 2>&1") != 0) {
+ return -1;
+ }
+ }
+ }
+
+ return 0; // success
+}
+
+struct cmdline_t parseCommandLine(int argc, char** argv) {
+ struct cmdline_t cmdline;
+ cmdline.port = 8080;
+ cmdline.module = NULL;
+ char version_string[256]; // arbitrary length to hold the version information
+ int c;
+
+ // build the version string
+ if (PROTOCOL_VERSION < PROTOCOL_DEV) {
+ snprintf(version_string, sizeof(version_string), "Streamline gatord version %d (DS-5 v5.%d)", PROTOCOL_VERSION, PROTOCOL_VERSION + 1);
+ } else {
+ snprintf(version_string, sizeof(version_string), "Streamline gatord development version %d", PROTOCOL_VERSION);
+ }
+
+ while ((c = getopt(argc, argv, "hvp:s:c:e:m:o:")) != -1) {
+ switch(c) {
+ case 'c':
+ gSessionData->mConfigurationXMLPath = optarg;
+ break;
+ case 'e':
+ gSessionData->mEventsXMLPath = optarg;
+ break;
+ case 'm':
+ cmdline.module = optarg;
+ break;
+ case 'p':
+ cmdline.port = strtol(optarg, NULL, 10);
+ break;
+ case 's':
+ gSessionData->mSessionXMLPath = optarg;
+ break;
+ case 'o':
+ gSessionData->mTargetPath = optarg;
+ break;
+ case 'h':
+ case '?':
+ logg->logError(__FILE__, __LINE__,
+ "%s. All parameters are optional:\n"
+ "-c config_xml path and filename of the configuration.xml to use\n"
+ "-e events_xml path and filename of the events.xml to use\n"
+ "-h this help page\n"
+ "-m module path and filename of gator.ko\n"
+ "-p port_number port upon which the server listens; default is 8080\n"
+ "-s session_xml path and filename of a session xml used for local capture\n"
+ "-o apc_dir path and name of the output for a local capture\n"
+ "-v version information\n"
+ , version_string);
+ handleException();
+ break;
+ case 'v':
+ logg->logError(__FILE__, __LINE__, version_string);
+ handleException();
+ break;
+ }
+ }
+
+ // Error checking
+ if (cmdline.port != 8080 && gSessionData->mSessionXMLPath != NULL) {
+ logg->logError(__FILE__, __LINE__, "Only a port or a session xml can be specified, not both");
+ handleException();
+ }
+
+ if (gSessionData->mTargetPath != NULL && gSessionData->mSessionXMLPath == NULL) {
+ logg->logError(__FILE__, __LINE__, "Missing -s command line option required for a local capture.");
+ handleException();
+ }
+
+ if (optind < argc) {
+ logg->logError(__FILE__, __LINE__, "Unknown argument: %s. Use '-h' for help.", argv[optind]);
+ handleException();
+ }
+
+ return cmdline;
+}
+
+// Gator data flow: collector -> collector fifo -> sender
+int main(int argc, char** argv, char* envp[]) {
+ // Ensure proper signal handling by making gatord the process group leader
+ // e.g. it may not be the group leader when launched as 'sudo gatord'
+ setsid();
+
+ gSessionData = new SessionData(); // Global data class
+ logg = new Logging(DEBUG); // Set up global thread-safe logging
+ util = new OlyUtility(); // Set up global utility class
+
+ prctl(PR_SET_NAME, (unsigned long)&"gatord-main", 0, 0, 0);
+ pthread_mutex_init(&numSessions_mutex, NULL);
+
+ signal(SIGINT, handler);
+ signal(SIGTERM, handler);
+ signal(SIGABRT, handler);
+
+ // Set to high priority
+ if (setpriority(PRIO_PROCESS, syscall(__NR_gettid), -19) == -1) {
+ logg->logMessage("setpriority() failed");
+ }
+
+ // Parse the command line parameters
+ struct cmdline_t cmdline = parseCommandLine(argc, argv);
+
+ // Call before setting up the SIGCHLD handler, as system() spawns child processes
+ setupFilesystem(cmdline.module);
+
+ // Handle child exit codes
+ signal(SIGCHLD, child_exit);
+
+ // Ignore the SIGPIPE signal so that any send to a broken socket will return an error code instead of asserting a signal
+ // Handling the error at the send function call is much easier than trying to do anything intelligent in the sig handler
+ signal(SIGPIPE, SIG_IGN);
+
+ // If the command line argument is a session xml file, no need to open a socket
+ if (gSessionData->mSessionXMLPath) {
+ child = new Child();
+ child->run();
+ delete child;
+ } else {
+ socket = new OlySocket(cmdline.port, true);
+ // Forever loop, can be exited via a signal or exception
+ while (1) {
+ logg->logMessage("Waiting on connection...");
+ socket->acceptConnection();
+
+ int pid = fork();
+ if (pid < 0) {
+ // Error
+ logg->logError(__FILE__, __LINE__, "Fork process failed. Please power cycle the target device if this error persists.");
+ } else if (pid == 0) {
+ // Child
+ socket->closeServerSocket();
+ child = new Child(socket, numSessions + 1);
+ child->run();
+ delete child;
+ exit(0);
+ } else {
+ // Parent
+ socket->closeSocket();
+
+ pthread_mutex_lock(&numSessions_mutex);
+ numSessions++;
+ pthread_mutex_unlock(&numSessions_mutex);
+
+ // Maximum number of connections is 2
+ int wait = 0;
+ while (numSessions > 1) {
+ // Throttle until one of the children exits before continuing to accept another socket connection
+ logg->logMessage("%d sessions active!", numSessions);
+ if (wait++ >= 10) { // Wait no more than 10 seconds
+ // Kill last created child
+ kill(pid, SIGALRM);
+ break;
+ }
+ sleep(1);
+ }
+ }
+ }
+ }
+
+ cleanUp();
+ return 0;
+}
diff --git a/tools/gator/daemon/mxml/COPYING b/tools/gator/daemon/mxml/COPYING
new file mode 100644
index 000000000000..4d0aa78af224
--- /dev/null
+++ b/tools/gator/daemon/mxml/COPYING
@@ -0,0 +1,507 @@
+ Mini-XML License
+ September 18, 2010
+
+
+The Mini-XML library and included programs are provided under the
+terms of the GNU Library General Public License version 2 (LGPL2)
+with the following exceptions:
+
+ 1. Static linking of applications to the Mini-XML library
+does not constitute a derivative work and does not require
+the author to provide source code for the application, use
+the shared Mini-XML libraries, or link their applications
+against a user-supplied version of Mini-XML.
+
+If you link the application to a modified version of
+Mini-XML, then the changes to Mini-XML must be provided
+under the terms of the LGPL2 in sections 1, 2, and 4.
+
+ 2. You do not have to provide a copy of the Mini-XML license
+with programs that are linked to the Mini-XML library, nor
+do you have to identify the Mini-XML license in your
+program or documentation as required by section 6 of the
+LGPL2.
+
+
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ [This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/tools/gator/daemon/mxml/config.h b/tools/gator/daemon/mxml/config.h
new file mode 100644
index 000000000000..1f59ba34a474
--- /dev/null
+++ b/tools/gator/daemon/mxml/config.h
@@ -0,0 +1,96 @@
+/* config.h. Generated from config.h.in by configure. */
+/*
+ * "$Id: config.h.in 408 2010-09-19 05:26:46Z mike $"
+ *
+ * Configuration file for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+
+
+/*
+ * Version number...
+ */
+
+#define MXML_VERSION "Mini-XML v2.7"
+
+
+/*
+ * Inline function support...
+ */
+
+#define inline
+
+
+/*
+ * Long long support...
+ */
+
+#define HAVE_LONG_LONG 1
+
+
+/*
+ * Do we have the snprintf() and vsnprintf() functions?
+ */
+
+#define HAVE_SNPRINTF 1
+#define HAVE_VSNPRINTF 1
+
+
+/*
+ * Do we have the strXXX() functions?
+ */
+
+#define HAVE_STRDUP 1
+
+
+/*
+ * Do we have threading support?
+ */
+
+#define HAVE_PTHREAD_H 1
+
+
+/*
+ * Define prototypes for string functions as needed...
+ */
+
+# ifndef HAVE_STRDUP
+extern char *_mxml_strdup(const char *);
+# define strdup _mxml_strdup
+# endif /* !HAVE_STRDUP */
+
+extern char *_mxml_strdupf(const char *, ...);
+extern char *_mxml_vstrdupf(const char *, va_list);
+
+# ifndef HAVE_SNPRINTF
+extern int _mxml_snprintf(char *, size_t, const char *, ...);
+# define snprintf _mxml_snprintf
+# endif /* !HAVE_SNPRINTF */
+
+# ifndef HAVE_VSNPRINTF
+extern int _mxml_vsnprintf(char *, size_t, const char *, va_list);
+# define vsnprintf _mxml_vsnprintf
+# endif /* !HAVE_VSNPRINTF */
+
+/*
+ * End of "$Id: config.h.in 408 2010-09-19 05:26:46Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-attr.c b/tools/gator/daemon/mxml/mxml-attr.c
new file mode 100644
index 000000000000..c9950f5fb732
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-attr.c
@@ -0,0 +1,319 @@
+/*
+ * "$Id: mxml-attr.c 408 2010-09-19 05:26:46Z mike $"
+ *
+ * Attribute support code for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlElementDeleteAttr() - Delete an attribute.
+ * mxmlElementGetAttr() - Get an attribute.
+ * mxmlElementSetAttr() - Set an attribute.
+ * mxmlElementSetAttrf() - Set an attribute with a formatted value.
+ * mxml_set_attr() - Set or add an attribute name/value pair.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * Local functions...
+ */
+
+static int mxml_set_attr(mxml_node_t *node, const char *name,
+ char *value);
+
+
+/*
+ * 'mxmlElementDeleteAttr()' - Delete an attribute.
+ *
+ * @since Mini-XML 2.4@
+ */
+
+void
+mxmlElementDeleteAttr(mxml_node_t *node,/* I - Element */
+ const char *name)/* I - Attribute name */
+{
+ int i; /* Looping var */
+ mxml_attr_t *attr; /* Cirrent attribute */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlElementDeleteAttr(node=%p, name=\"%s\")\n",
+ node, name ? name : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT || !name)
+ return;
+
+ /*
+ * Look for the attribute...
+ */
+
+ for (i = node->value.element.num_attrs, attr = node->value.element.attrs;
+ i > 0;
+ i --, attr ++)
+ {
+#ifdef DEBUG
+ printf(" %s=\"%s\"\n", attr->name, attr->value);
+#endif /* DEBUG */
+
+ if (!strcmp(attr->name, name))
+ {
+ /*
+ * Delete this attribute...
+ */
+
+ free(attr->name);
+ free(attr->value);
+
+ i --;
+ if (i > 0)
+ memmove(attr, attr + 1, i * sizeof(mxml_attr_t));
+
+ node->value.element.num_attrs --;
+ return;
+ }
+ }
+}
+
+
+/*
+ * 'mxmlElementGetAttr()' - Get an attribute.
+ *
+ * This function returns NULL if the node is not an element or the
+ * named attribute does not exist.
+ */
+
+const char * /* O - Attribute value or NULL */
+mxmlElementGetAttr(mxml_node_t *node, /* I - Element node */
+ const char *name) /* I - Name of attribute */
+{
+ int i; /* Looping var */
+ mxml_attr_t *attr; /* Cirrent attribute */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlElementGetAttr(node=%p, name=\"%s\")\n",
+ node, name ? name : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT || !name)
+ return (NULL);
+
+ /*
+ * Look for the attribute...
+ */
+
+ for (i = node->value.element.num_attrs, attr = node->value.element.attrs;
+ i > 0;
+ i --, attr ++)
+ {
+#ifdef DEBUG
+ printf(" %s=\"%s\"\n", attr->name, attr->value);
+#endif /* DEBUG */
+
+ if (!strcmp(attr->name, name))
+ {
+#ifdef DEBUG
+ printf(" Returning \"%s\"!\n", attr->value);
+#endif /* DEBUG */
+ return (attr->value);
+ }
+ }
+
+ /*
+ * Didn't find attribute, so return NULL...
+ */
+
+#ifdef DEBUG
+ puts(" Returning NULL!\n");
+#endif /* DEBUG */
+
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlElementSetAttr()' - Set an attribute.
+ *
+ * If the named attribute already exists, the value of the attribute
+ * is replaced by the new string value. The string value is copied
+ * into the element node. This function does nothing if the node is
+ * not an element.
+ */
+
+void
+mxmlElementSetAttr(mxml_node_t *node, /* I - Element node */
+ const char *name, /* I - Name of attribute */
+ const char *value) /* I - Attribute value */
+{
+ char *valuec; /* Copy of value */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlElementSetAttr(node=%p, name=\"%s\", value=\"%s\")\n",
+ node, name ? name : "(null)", value ? value : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT || !name)
+ return;
+
+ if (value)
+ valuec = strdup(value);
+ else
+ valuec = NULL;
+
+ if (mxml_set_attr(node, name, valuec))
+ free(valuec);
+}
+
+
+/*
+ * 'mxmlElementSetAttrf()' - Set an attribute with a formatted value.
+ *
+ * If the named attribute already exists, the value of the attribute
+ * is replaced by the new formatted string. The formatted string value is
+ * copied into the element node. This function does nothing if the node
+ * is not an element.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+void
+mxmlElementSetAttrf(mxml_node_t *node, /* I - Element node */
+ const char *name, /* I - Name of attribute */
+ const char *format,/* I - Printf-style attribute value */
+ ...) /* I - Additional arguments as needed */
+{
+ va_list ap; /* Argument pointer */
+ char *value; /* Value */
+
+
+#ifdef DEBUG
+ fprintf(stderr,
+ "mxmlElementSetAttrf(node=%p, name=\"%s\", format=\"%s\", ...)\n",
+ node, name ? name : "(null)", format ? format : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT || !name || !format)
+ return;
+
+ /*
+ * Format the value...
+ */
+
+ va_start(ap, format);
+ value = _mxml_vstrdupf(format, ap);
+ va_end(ap);
+
+ if (!value)
+ mxml_error("Unable to allocate memory for attribute '%s' in element %s!",
+ name, node->value.element.name);
+ else if (mxml_set_attr(node, name, value))
+ free(value);
+}
+
+
+/*
+ * 'mxml_set_attr()' - Set or add an attribute name/value pair.
+ */
+
+static int /* O - 0 on success, -1 on failure */
+mxml_set_attr(mxml_node_t *node, /* I - Element node */
+ const char *name, /* I - Attribute name */
+ char *value) /* I - Attribute value */
+{
+ int i; /* Looping var */
+ mxml_attr_t *attr; /* New attribute */
+
+
+ /*
+ * Look for the attribute...
+ */
+
+ for (i = node->value.element.num_attrs, attr = node->value.element.attrs;
+ i > 0;
+ i --, attr ++)
+ if (!strcmp(attr->name, name))
+ {
+ /*
+ * Free the old value as needed...
+ */
+
+ if (attr->value)
+ free(attr->value);
+
+ attr->value = value;
+
+ return (0);
+ }
+
+ /*
+ * Add a new attribute...
+ */
+
+ if (node->value.element.num_attrs == 0)
+ attr = malloc(sizeof(mxml_attr_t));
+ else
+ attr = realloc(node->value.element.attrs,
+ (node->value.element.num_attrs + 1) * sizeof(mxml_attr_t));
+
+ if (!attr)
+ {
+ mxml_error("Unable to allocate memory for attribute '%s' in element %s!",
+ name, node->value.element.name);
+ return (-1);
+ }
+
+ node->value.element.attrs = attr;
+ attr += node->value.element.num_attrs;
+
+ if ((attr->name = strdup(name)) == NULL)
+ {
+ mxml_error("Unable to allocate memory for attribute '%s' in element %s!",
+ name, node->value.element.name);
+ return (-1);
+ }
+
+ attr->value = value;
+
+ node->value.element.num_attrs ++;
+
+ return (0);
+}
+
+
+/*
+ * End of "$Id: mxml-attr.c 408 2010-09-19 05:26:46Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-entity.c b/tools/gator/daemon/mxml/mxml-entity.c
new file mode 100644
index 000000000000..c5c9f61f73c2
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-entity.c
@@ -0,0 +1,460 @@
+/*
+ * "$Id: mxml-entity.c 408 2010-09-19 05:26:46Z mike $"
+ *
+ * Character entity support code for Mini-XML, a small XML-like
+ * file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlEntityAddCallback() - Add a callback to convert entities to
+ * Unicode.
+ * mxmlEntityGetName() - Get the name that corresponds to the
+ * character value.
+ * mxmlEntityGetValue() - Get the character corresponding to a named
+ * entity.
+ * mxmlEntityRemoveCallback() - Remove a callback.
+ * _mxml_entity_cb() - Lookup standard (X)HTML entities.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "mxml-private.h"
+
+
+/*
+ * 'mxmlEntityAddCallback()' - Add a callback to convert entities to Unicode.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlEntityAddCallback(
+ mxml_entity_cb_t cb) /* I - Callback function to add */
+{
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ if (global->num_entity_cbs < (int)(sizeof(global->entity_cbs) / sizeof(global->entity_cbs[0])))
+ {
+ global->entity_cbs[global->num_entity_cbs] = cb;
+ global->num_entity_cbs ++;
+
+ return (0);
+ }
+ else
+ {
+ mxml_error("Unable to add entity callback!");
+
+ return (-1);
+ }
+}
+
+
+/*
+ * 'mxmlEntityGetName()' - Get the name that corresponds to the character value.
+ *
+ * If val does not need to be represented by a named entity, NULL is returned.
+ */
+
+const char * /* O - Entity name or NULL */
+mxmlEntityGetName(int val) /* I - Character value */
+{
+ switch (val)
+ {
+ case '&' :
+ return ("amp");
+
+ case '<' :
+ return ("lt");
+
+ case '>' :
+ return ("gt");
+
+ case '\"' :
+ return ("quot");
+
+ default :
+ return (NULL);
+ }
+}
+
+
+/*
+ * 'mxmlEntityGetValue()' - Get the character corresponding to a named entity.
+ *
+ * The entity name can also be a numeric constant. -1 is returned if the
+ * name is not known.
+ */
+
+int /* O - Character value or -1 on error */
+mxmlEntityGetValue(const char *name) /* I - Entity name */
+{
+ int i; /* Looping var */
+ int ch; /* Character value */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ for (i = 0; i < global->num_entity_cbs; i ++)
+ if ((ch = (global->entity_cbs[i])(name)) >= 0)
+ return (ch);
+
+ return (-1);
+}
+
+
+/*
+ * 'mxmlEntityRemoveCallback()' - Remove a callback.
+ */
+
+void
+mxmlEntityRemoveCallback(
+ mxml_entity_cb_t cb) /* I - Callback function to remove */
+{
+ int i; /* Looping var */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ for (i = 0; i < global->num_entity_cbs; i ++)
+ if (cb == global->entity_cbs[i])
+ {
+ /*
+ * Remove the callback...
+ */
+
+ global->num_entity_cbs --;
+
+ if (i < global->num_entity_cbs)
+ memmove(global->entity_cbs + i, global->entity_cbs + i + 1,
+ (global->num_entity_cbs - i) * sizeof(global->entity_cbs[0]));
+
+ return;
+ }
+}
+
+
+/*
+ * '_mxml_entity_cb()' - Lookup standard (X)HTML entities.
+ */
+
+int /* O - Unicode value or -1 */
+_mxml_entity_cb(const char *name) /* I - Entity name */
+{
+ int diff, /* Difference between names */
+ current, /* Current entity in search */
+ first, /* First entity in search */
+ last; /* Last entity in search */
+ static const struct
+ {
+ const char *name; /* Entity name */
+ int val; /* Character value */
+ } entities[] =
+ {
+ { "AElig", 198 },
+ { "Aacute", 193 },
+ { "Acirc", 194 },
+ { "Agrave", 192 },
+ { "Alpha", 913 },
+ { "Aring", 197 },
+ { "Atilde", 195 },
+ { "Auml", 196 },
+ { "Beta", 914 },
+ { "Ccedil", 199 },
+ { "Chi", 935 },
+ { "Dagger", 8225 },
+ { "Delta", 916 },
+ { "Dstrok", 208 },
+ { "ETH", 208 },
+ { "Eacute", 201 },
+ { "Ecirc", 202 },
+ { "Egrave", 200 },
+ { "Epsilon", 917 },
+ { "Eta", 919 },
+ { "Euml", 203 },
+ { "Gamma", 915 },
+ { "Iacute", 205 },
+ { "Icirc", 206 },
+ { "Igrave", 204 },
+ { "Iota", 921 },
+ { "Iuml", 207 },
+ { "Kappa", 922 },
+ { "Lambda", 923 },
+ { "Mu", 924 },
+ { "Ntilde", 209 },
+ { "Nu", 925 },
+ { "OElig", 338 },
+ { "Oacute", 211 },
+ { "Ocirc", 212 },
+ { "Ograve", 210 },
+ { "Omega", 937 },
+ { "Omicron", 927 },
+ { "Oslash", 216 },
+ { "Otilde", 213 },
+ { "Ouml", 214 },
+ { "Phi", 934 },
+ { "Pi", 928 },
+ { "Prime", 8243 },
+ { "Psi", 936 },
+ { "Rho", 929 },
+ { "Scaron", 352 },
+ { "Sigma", 931 },
+ { "THORN", 222 },
+ { "Tau", 932 },
+ { "Theta", 920 },
+ { "Uacute", 218 },
+ { "Ucirc", 219 },
+ { "Ugrave", 217 },
+ { "Upsilon", 933 },
+ { "Uuml", 220 },
+ { "Xi", 926 },
+ { "Yacute", 221 },
+ { "Yuml", 376 },
+ { "Zeta", 918 },
+ { "aacute", 225 },
+ { "acirc", 226 },
+ { "acute", 180 },
+ { "aelig", 230 },
+ { "agrave", 224 },
+ { "alefsym", 8501 },
+ { "alpha", 945 },
+ { "amp", '&' },
+ { "and", 8743 },
+ { "ang", 8736 },
+ { "apos", '\'' },
+ { "aring", 229 },
+ { "asymp", 8776 },
+ { "atilde", 227 },
+ { "auml", 228 },
+ { "bdquo", 8222 },
+ { "beta", 946 },
+ { "brkbar", 166 },
+ { "brvbar", 166 },
+ { "bull", 8226 },
+ { "cap", 8745 },
+ { "ccedil", 231 },
+ { "cedil", 184 },
+ { "cent", 162 },
+ { "chi", 967 },
+ { "circ", 710 },
+ { "clubs", 9827 },
+ { "cong", 8773 },
+ { "copy", 169 },
+ { "crarr", 8629 },
+ { "cup", 8746 },
+ { "curren", 164 },
+ { "dArr", 8659 },
+ { "dagger", 8224 },
+ { "darr", 8595 },
+ { "deg", 176 },
+ { "delta", 948 },
+ { "diams", 9830 },
+ { "die", 168 },
+ { "divide", 247 },
+ { "eacute", 233 },
+ { "ecirc", 234 },
+ { "egrave", 232 },
+ { "empty", 8709 },
+ { "emsp", 8195 },
+ { "ensp", 8194 },
+ { "epsilon", 949 },
+ { "equiv", 8801 },
+ { "eta", 951 },
+ { "eth", 240 },
+ { "euml", 235 },
+ { "euro", 8364 },
+ { "exist", 8707 },
+ { "fnof", 402 },
+ { "forall", 8704 },
+ { "frac12", 189 },
+ { "frac14", 188 },
+ { "frac34", 190 },
+ { "frasl", 8260 },
+ { "gamma", 947 },
+ { "ge", 8805 },
+ { "gt", '>' },
+ { "hArr", 8660 },
+ { "harr", 8596 },
+ { "hearts", 9829 },
+ { "hellip", 8230 },
+ { "hibar", 175 },
+ { "iacute", 237 },
+ { "icirc", 238 },
+ { "iexcl", 161 },
+ { "igrave", 236 },
+ { "image", 8465 },
+ { "infin", 8734 },
+ { "int", 8747 },
+ { "iota", 953 },
+ { "iquest", 191 },
+ { "isin", 8712 },
+ { "iuml", 239 },
+ { "kappa", 954 },
+ { "lArr", 8656 },
+ { "lambda", 955 },
+ { "lang", 9001 },
+ { "laquo", 171 },
+ { "larr", 8592 },
+ { "lceil", 8968 },
+ { "ldquo", 8220 },
+ { "le", 8804 },
+ { "lfloor", 8970 },
+ { "lowast", 8727 },
+ { "loz", 9674 },
+ { "lrm", 8206 },
+ { "lsaquo", 8249 },
+ { "lsquo", 8216 },
+ { "lt", '<' },
+ { "macr", 175 },
+ { "mdash", 8212 },
+ { "micro", 181 },
+ { "middot", 183 },
+ { "minus", 8722 },
+ { "mu", 956 },
+ { "nabla", 8711 },
+ { "nbsp", 160 },
+ { "ndash", 8211 },
+ { "ne", 8800 },
+ { "ni", 8715 },
+ { "not", 172 },
+ { "notin", 8713 },
+ { "nsub", 8836 },
+ { "ntilde", 241 },
+ { "nu", 957 },
+ { "oacute", 243 },
+ { "ocirc", 244 },
+ { "oelig", 339 },
+ { "ograve", 242 },
+ { "oline", 8254 },
+ { "omega", 969 },
+ { "omicron", 959 },
+ { "oplus", 8853 },
+ { "or", 8744 },
+ { "ordf", 170 },
+ { "ordm", 186 },
+ { "oslash", 248 },
+ { "otilde", 245 },
+ { "otimes", 8855 },
+ { "ouml", 246 },
+ { "para", 182 },
+ { "part", 8706 },
+ { "permil", 8240 },
+ { "perp", 8869 },
+ { "phi", 966 },
+ { "pi", 960 },
+ { "piv", 982 },
+ { "plusmn", 177 },
+ { "pound", 163 },
+ { "prime", 8242 },
+ { "prod", 8719 },
+ { "prop", 8733 },
+ { "psi", 968 },
+ { "quot", '\"' },
+ { "rArr", 8658 },
+ { "radic", 8730 },
+ { "rang", 9002 },
+ { "raquo", 187 },
+ { "rarr", 8594 },
+ { "rceil", 8969 },
+ { "rdquo", 8221 },
+ { "real", 8476 },
+ { "reg", 174 },
+ { "rfloor", 8971 },
+ { "rho", 961 },
+ { "rlm", 8207 },
+ { "rsaquo", 8250 },
+ { "rsquo", 8217 },
+ { "sbquo", 8218 },
+ { "scaron", 353 },
+ { "sdot", 8901 },
+ { "sect", 167 },
+ { "shy", 173 },
+ { "sigma", 963 },
+ { "sigmaf", 962 },
+ { "sim", 8764 },
+ { "spades", 9824 },
+ { "sub", 8834 },
+ { "sube", 8838 },
+ { "sum", 8721 },
+ { "sup", 8835 },
+ { "sup1", 185 },
+ { "sup2", 178 },
+ { "sup3", 179 },
+ { "supe", 8839 },
+ { "szlig", 223 },
+ { "tau", 964 },
+ { "there4", 8756 },
+ { "theta", 952 },
+ { "thetasym", 977 },
+ { "thinsp", 8201 },
+ { "thorn", 254 },
+ { "tilde", 732 },
+ { "times", 215 },
+ { "trade", 8482 },
+ { "uArr", 8657 },
+ { "uacute", 250 },
+ { "uarr", 8593 },
+ { "ucirc", 251 },
+ { "ugrave", 249 },
+ { "uml", 168 },
+ { "upsih", 978 },
+ { "upsilon", 965 },
+ { "uuml", 252 },
+ { "weierp", 8472 },
+ { "xi", 958 },
+ { "yacute", 253 },
+ { "yen", 165 },
+ { "yuml", 255 },
+ { "zeta", 950 },
+ { "zwj", 8205 },
+ { "zwnj", 8204 }
+ };
+
+
+ /*
+ * Do a binary search for the named entity...
+ */
+
+ first = 0;
+ last = (int)(sizeof(entities) / sizeof(entities[0]) - 1);
+
+ while ((last - first) > 1)
+ {
+ current = (first + last) / 2;
+
+ if ((diff = strcmp(name, entities[current].name)) == 0)
+ return (entities[current].val);
+ else if (diff < 0)
+ last = current;
+ else
+ first = current;
+ }
+
+ /*
+ * If we get here, there is a small chance that there is still
+ * a match; check first and last...
+ */
+
+ if (!strcmp(name, entities[first].name))
+ return (entities[first].val);
+ else if (!strcmp(name, entities[last].name))
+ return (entities[last].val);
+ else
+ return (-1);
+}
+
+
+/*
+ * End of "$Id: mxml-entity.c 408 2010-09-19 05:26:46Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-file.c b/tools/gator/daemon/mxml/mxml-file.c
new file mode 100644
index 000000000000..7860ee5f8370
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-file.c
@@ -0,0 +1,3082 @@
+/*
+ * "$Id: mxml-file.c 438 2011-03-24 05:47:51Z mike $"
+ *
+ * File loading code for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlLoadFd() - Load a file descriptor into an XML node tree.
+ * mxmlLoadFile() - Load a file into an XML node tree.
+ * mxmlLoadString() - Load a string into an XML node tree.
+ * mxmlSaveAllocString() - Save an XML tree to an allocated string.
+ * mxmlSaveFd() - Save an XML tree to a file descriptor.
+ * mxmlSaveFile() - Save an XML tree to a file.
+ * mxmlSaveString() - Save an XML node tree to a string.
+ * mxmlSAXLoadFd() - Load a file descriptor into an XML node tree
+ * using a SAX callback.
+ * mxmlSAXLoadFile() - Load a file into an XML node tree
+ * using a SAX callback.
+ * mxmlSAXLoadString() - Load a string into an XML node tree
+ * using a SAX callback.
+ * mxmlSetCustomHandlers() - Set the handling functions for custom data.
+ * mxmlSetErrorCallback() - Set the error message callback.
+ * mxmlSetWrapMargin() - Set the wrap margin when saving XML data.
+ * mxml_add_char() - Add a character to a buffer, expanding as needed.
+ * mxml_fd_getc() - Read a character from a file descriptor.
+ * mxml_fd_putc() - Write a character to a file descriptor.
+ * mxml_fd_read() - Read a buffer of data from a file descriptor.
+ * mxml_fd_write() - Write a buffer of data to a file descriptor.
+ * mxml_file_getc() - Get a character from a file.
+ * mxml_file_putc() - Write a character to a file.
+ * mxml_get_entity() - Get the character corresponding to an entity...
+ * mxml_load_data() - Load data into an XML node tree.
+ * mxml_parse_element() - Parse an element for any attributes...
+ * mxml_string_getc() - Get a character from a string.
+ * mxml_string_putc() - Write a character to a string.
+ * mxml_write_name() - Write a name string.
+ * mxml_write_node() - Save an XML node to a file.
+ * mxml_write_string() - Write a string, escaping & and < as needed.
+ * mxml_write_ws() - Do whitespace callback...
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#ifndef WIN32
+# include <unistd.h>
+#endif /* !WIN32 */
+#include "mxml-private.h"
+
+
+/*
+ * Character encoding...
+ */
+
+#define ENCODE_UTF8 0 /* UTF-8 */
+#define ENCODE_UTF16BE 1 /* UTF-16 Big-Endian */
+#define ENCODE_UTF16LE 2 /* UTF-16 Little-Endian */
+
+
+/*
+ * Macro to test for a bad XML character...
+ */
+
+#define mxml_bad_char(ch) ((ch) < ' ' && (ch) != '\n' && (ch) != '\r' && (ch) != '\t')
+
+
+/*
+ * Types and structures...
+ */
+
+typedef int (*_mxml_getc_cb_t)(void *, int *);
+typedef int (*_mxml_putc_cb_t)(int, void *);
+
+typedef struct _mxml_fdbuf_s /**** File descriptor buffer ****/
+{
+ int fd; /* File descriptor */
+ unsigned char *current, /* Current position in buffer */
+ *end, /* End of buffer */
+ buffer[8192]; /* Character buffer */
+} _mxml_fdbuf_t;
+
+
+/*
+ * Local functions...
+ */
+
+static int mxml_add_char(int ch, char **ptr, char **buffer,
+ int *bufsize);
+static int mxml_fd_getc(void *p, int *encoding);
+static int mxml_fd_putc(int ch, void *p);
+static int mxml_fd_read(_mxml_fdbuf_t *buf);
+static int mxml_fd_write(_mxml_fdbuf_t *buf);
+static int mxml_file_getc(void *p, int *encoding);
+static int mxml_file_putc(int ch, void *p);
+static int mxml_get_entity(mxml_node_t *parent, void *p,
+ int *encoding,
+ _mxml_getc_cb_t getc_cb);
+static inline int mxml_isspace(int ch)
+ {
+ return (ch == ' ' || ch == '\t' || ch == '\r' ||
+ ch == '\n');
+ }
+static mxml_node_t *mxml_load_data(mxml_node_t *top, void *p,
+ mxml_load_cb_t cb,
+ _mxml_getc_cb_t getc_cb,
+ mxml_sax_cb_t sax_cb, void *sax_data);
+static int mxml_parse_element(mxml_node_t *node, void *p,
+ int *encoding,
+ _mxml_getc_cb_t getc_cb);
+static int mxml_string_getc(void *p, int *encoding);
+static int mxml_string_putc(int ch, void *p);
+static int mxml_write_name(const char *s, void *p,
+ _mxml_putc_cb_t putc_cb);
+static int mxml_write_node(mxml_node_t *node, void *p,
+ mxml_save_cb_t cb, int col,
+ _mxml_putc_cb_t putc_cb,
+ _mxml_global_t *global);
+static int mxml_write_string(const char *s, void *p,
+ _mxml_putc_cb_t putc_cb);
+static int mxml_write_ws(mxml_node_t *node, void *p,
+ mxml_save_cb_t cb, int ws,
+ int col, _mxml_putc_cb_t putc_cb);
+
+
+/*
+ * 'mxmlLoadFd()' - Load a file descriptor into an XML node tree.
+ *
+ * The nodes in the specified file are added to the specified top node.
+ * If no top node is provided, the XML file MUST be well-formed with a
+ * single parent node like <?xml> for the entire file. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ */
+
+mxml_node_t * /* O - First node or NULL if the file could not be read. */
+mxmlLoadFd(mxml_node_t *top, /* I - Top node */
+ int fd, /* I - File descriptor to read from */
+ mxml_load_cb_t cb) /* I - Callback function or MXML_NO_CALLBACK */
+{
+ _mxml_fdbuf_t buf; /* File descriptor buffer */
+
+
+ /*
+ * Initialize the file descriptor buffer...
+ */
+
+ buf.fd = fd;
+ buf.current = buf.buffer;
+ buf.end = buf.buffer;
+
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, &buf, cb, mxml_fd_getc, MXML_NO_CALLBACK, NULL));
+}
+
+
+/*
+ * 'mxmlLoadFile()' - Load a file into an XML node tree.
+ *
+ * The nodes in the specified file are added to the specified top node.
+ * If no top node is provided, the XML file MUST be well-formed with a
+ * single parent node like <?xml> for the entire file. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ */
+
+mxml_node_t * /* O - First node or NULL if the file could not be read. */
+mxmlLoadFile(mxml_node_t *top, /* I - Top node */
+ FILE *fp, /* I - File to read from */
+ mxml_load_cb_t cb) /* I - Callback function or MXML_NO_CALLBACK */
+{
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, fp, cb, mxml_file_getc, MXML_NO_CALLBACK, NULL));
+}
+
+
+/*
+ * 'mxmlLoadString()' - Load a string into an XML node tree.
+ *
+ * The nodes in the specified string are added to the specified top node.
+ * If no top node is provided, the XML string MUST be well-formed with a
+ * single parent node like <?xml> for the entire string. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ */
+
+mxml_node_t * /* O - First node or NULL if the string has errors. */
+mxmlLoadString(mxml_node_t *top, /* I - Top node */
+ const char *s, /* I - String to load */
+ mxml_load_cb_t cb) /* I - Callback function or MXML_NO_CALLBACK */
+{
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, (void *)&s, cb, mxml_string_getc, MXML_NO_CALLBACK,
+ NULL));
+}
+
+
+/*
+ * 'mxmlSaveAllocString()' - Save an XML tree to an allocated string.
+ *
+ * This function returns a pointer to a string containing the textual
+ * representation of the XML node tree. The string should be freed
+ * using the free() function when you are done with it. NULL is returned
+ * if the node would produce an empty string or if the string cannot be
+ * allocated.
+ *
+ * The callback argument specifies a function that returns a whitespace
+ * string or NULL before and after each element. If MXML_NO_CALLBACK
+ * is specified, whitespace will only be added before MXML_TEXT nodes
+ * with leading whitespace and before attribute names inside opening
+ * element tags.
+ */
+
+char * /* O - Allocated string or NULL */
+mxmlSaveAllocString(
+ mxml_node_t *node, /* I - Node to write */
+ mxml_save_cb_t cb) /* I - Whitespace callback or MXML_NO_CALLBACK */
+{
+ int bytes; /* Required bytes */
+ char buffer[8192]; /* Temporary buffer */
+ char *s; /* Allocated string */
+
+
+ /*
+ * Write the node to the temporary buffer...
+ */
+
+ bytes = mxmlSaveString(node, buffer, sizeof(buffer), cb);
+
+ if (bytes <= 0)
+ return (NULL);
+
+ if (bytes < (int)(sizeof(buffer) - 1))
+ {
+ /*
+ * Node fit inside the buffer, so just duplicate that string and
+ * return...
+ */
+
+ return (strdup(buffer));
+ }
+
+ /*
+ * Allocate a buffer of the required size and save the node to the
+ * new buffer...
+ */
+
+ if ((s = malloc(bytes + 1)) == NULL)
+ return (NULL);
+
+ mxmlSaveString(node, s, bytes + 1, cb);
+
+ /*
+ * Return the allocated string...
+ */
+
+ return (s);
+}
+
+
+/*
+ * 'mxmlSaveFd()' - Save an XML tree to a file descriptor.
+ *
+ * The callback argument specifies a function that returns a whitespace
+ * string or NULL before and after each element. If MXML_NO_CALLBACK
+ * is specified, whitespace will only be added before MXML_TEXT nodes
+ * with leading whitespace and before attribute names inside opening
+ * element tags.
+ */
+
+int /* O - 0 on success, -1 on error. */
+mxmlSaveFd(mxml_node_t *node, /* I - Node to write */
+ int fd, /* I - File descriptor to write to */
+ mxml_save_cb_t cb) /* I - Whitespace callback or MXML_NO_CALLBACK */
+{
+ int col; /* Final column */
+ _mxml_fdbuf_t buf; /* File descriptor buffer */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ /*
+ * Initialize the file descriptor buffer...
+ */
+
+ buf.fd = fd;
+ buf.current = buf.buffer;
+ buf.end = buf.buffer + sizeof(buf.buffer);
+
+ /*
+ * Write the node...
+ */
+
+ if ((col = mxml_write_node(node, &buf, cb, 0, mxml_fd_putc, global)) < 0)
+ return (-1);
+
+ if (col > 0)
+ if (mxml_fd_putc('\n', &buf) < 0)
+ return (-1);
+
+ /*
+ * Flush and return...
+ */
+
+ return (mxml_fd_write(&buf));
+}
+
+
+/*
+ * 'mxmlSaveFile()' - Save an XML tree to a file.
+ *
+ * The callback argument specifies a function that returns a whitespace
+ * string or NULL before and after each element. If MXML_NO_CALLBACK
+ * is specified, whitespace will only be added before MXML_TEXT nodes
+ * with leading whitespace and before attribute names inside opening
+ * element tags.
+ */
+
+int /* O - 0 on success, -1 on error. */
+mxmlSaveFile(mxml_node_t *node, /* I - Node to write */
+ FILE *fp, /* I - File to write to */
+ mxml_save_cb_t cb) /* I - Whitespace callback or MXML_NO_CALLBACK */
+{
+ int col; /* Final column */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ /*
+ * Write the node...
+ */
+
+ if ((col = mxml_write_node(node, fp, cb, 0, mxml_file_putc, global)) < 0)
+ return (-1);
+
+ if (col > 0)
+ if (putc('\n', fp) < 0)
+ return (-1);
+
+ /*
+ * Return 0 (success)...
+ */
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSaveString()' - Save an XML node tree to a string.
+ *
+ * This function returns the total number of bytes that would be
+ * required for the string but only copies (bufsize - 1) characters
+ * into the specified buffer.
+ *
+ * The callback argument specifies a function that returns a whitespace
+ * string or NULL before and after each element. If MXML_NO_CALLBACK
+ * is specified, whitespace will only be added before MXML_TEXT nodes
+ * with leading whitespace and before attribute names inside opening
+ * element tags.
+ */
+
+int /* O - Size of string */
+mxmlSaveString(mxml_node_t *node, /* I - Node to write */
+ char *buffer, /* I - String buffer */
+ int bufsize, /* I - Size of string buffer */
+ mxml_save_cb_t cb) /* I - Whitespace callback or MXML_NO_CALLBACK */
+{
+ int col; /* Final column */
+ char *ptr[2]; /* Pointers for putc_cb */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ /*
+ * Write the node...
+ */
+
+ ptr[0] = buffer;
+ ptr[1] = buffer + bufsize;
+
+ if ((col = mxml_write_node(node, ptr, cb, 0, mxml_string_putc, global)) < 0)
+ return (-1);
+
+ if (col > 0)
+ mxml_string_putc('\n', ptr);
+
+ /*
+ * Nul-terminate the buffer...
+ */
+
+ if (ptr[0] >= ptr[1])
+ buffer[bufsize - 1] = '\0';
+ else
+ ptr[0][0] = '\0';
+
+ /*
+ * Return the number of characters...
+ */
+
+ return (ptr[0] - buffer);
+}
+
+
+/*
+ * 'mxmlSAXLoadFd()' - Load a file descriptor into an XML node tree
+ * using a SAX callback.
+ *
+ * The nodes in the specified file are added to the specified top node.
+ * If no top node is provided, the XML file MUST be well-formed with a
+ * single parent node like <?xml> for the entire file. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ *
+ * The SAX callback must call mxmlRetain() for any nodes that need to
+ * be kept for later use. Otherwise, nodes are deleted when the parent
+ * node is closed or after each data, comment, CDATA, or directive node.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+mxml_node_t * /* O - First node or NULL if the file could not be read. */
+mxmlSAXLoadFd(mxml_node_t *top, /* I - Top node */
+ int fd, /* I - File descriptor to read from */
+ mxml_load_cb_t cb, /* I - Callback function or MXML_NO_CALLBACK */
+ mxml_sax_cb_t sax_cb, /* I - SAX callback or MXML_NO_CALLBACK */
+ void *sax_data) /* I - SAX user data */
+{
+ _mxml_fdbuf_t buf; /* File descriptor buffer */
+
+
+ /*
+ * Initialize the file descriptor buffer...
+ */
+
+ buf.fd = fd;
+ buf.current = buf.buffer;
+ buf.end = buf.buffer;
+
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, &buf, cb, mxml_fd_getc, sax_cb, sax_data));
+}
+
+
+/*
+ * 'mxmlSAXLoadFile()' - Load a file into an XML node tree
+ * using a SAX callback.
+ *
+ * The nodes in the specified file are added to the specified top node.
+ * If no top node is provided, the XML file MUST be well-formed with a
+ * single parent node like <?xml> for the entire file. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ *
+ * The SAX callback must call mxmlRetain() for any nodes that need to
+ * be kept for later use. Otherwise, nodes are deleted when the parent
+ * node is closed or after each data, comment, CDATA, or directive node.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+mxml_node_t * /* O - First node or NULL if the file could not be read. */
+mxmlSAXLoadFile(
+ mxml_node_t *top, /* I - Top node */
+ FILE *fp, /* I - File to read from */
+ mxml_load_cb_t cb, /* I - Callback function or MXML_NO_CALLBACK */
+ mxml_sax_cb_t sax_cb, /* I - SAX callback or MXML_NO_CALLBACK */
+ void *sax_data) /* I - SAX user data */
+{
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, fp, cb, mxml_file_getc, sax_cb, sax_data));
+}
+
+
+/*
+ * 'mxmlSAXLoadString()' - Load a string into an XML node tree
+ * using a SAX callback.
+ *
+ * The nodes in the specified string are added to the specified top node.
+ * If no top node is provided, the XML string MUST be well-formed with a
+ * single parent node like <?xml> for the entire string. The callback
+ * function returns the value type that should be used for child nodes.
+ * If MXML_NO_CALLBACK is specified then all child nodes will be either
+ * MXML_ELEMENT or MXML_TEXT nodes.
+ *
+ * The constants MXML_INTEGER_CALLBACK, MXML_OPAQUE_CALLBACK,
+ * MXML_REAL_CALLBACK, and MXML_TEXT_CALLBACK are defined for loading
+ * child nodes of the specified type.
+ *
+ * The SAX callback must call mxmlRetain() for any nodes that need to
+ * be kept for later use. Otherwise, nodes are deleted when the parent
+ * node is closed or after each data, comment, CDATA, or directive node.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+mxml_node_t * /* O - First node or NULL if the string has errors. */
+mxmlSAXLoadString(
+ mxml_node_t *top, /* I - Top node */
+ const char *s, /* I - String to load */
+ mxml_load_cb_t cb, /* I - Callback function or MXML_NO_CALLBACK */
+ mxml_sax_cb_t sax_cb, /* I - SAX callback or MXML_NO_CALLBACK */
+ void *sax_data) /* I - SAX user data */
+{
+ /*
+ * Read the XML data...
+ */
+
+ return (mxml_load_data(top, (void *)&s, cb, mxml_string_getc, sax_cb, sax_data));
+}
+
+
+/*
+ * 'mxmlSetCustomHandlers()' - Set the handling functions for custom data.
+ *
+ * The load function accepts a node pointer and a data string and must
+ * return 0 on success and non-zero on error.
+ *
+ * The save function accepts a node pointer and must return a malloc'd
+ * string on success and NULL on error.
+ *
+ */
+
+void
+mxmlSetCustomHandlers(
+ mxml_custom_load_cb_t load, /* I - Load function */
+ mxml_custom_save_cb_t save) /* I - Save function */
+{
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ global->custom_load_cb = load;
+ global->custom_save_cb = save;
+}
+
+
+/*
+ * 'mxmlSetErrorCallback()' - Set the error message callback.
+ */
+
+void
+mxmlSetErrorCallback(mxml_error_cb_t cb)/* I - Error callback function */
+{
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ global->error_cb = cb;
+}
+
+
+/*
+ * 'mxmlSetWrapMargin()' - Set the wrap margin when saving XML data.
+ *
+ * Wrapping is disabled when "column" is 0.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+void
+mxmlSetWrapMargin(int column) /* I - Column for wrapping, 0 to disable wrapping */
+{
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ global->wrap = column;
+}
+
+
+/*
+ * 'mxml_add_char()' - Add a character to a buffer, expanding as needed.
+ */
+
+static int /* O - 0 on success, -1 on error */
+mxml_add_char(int ch, /* I - Character to add */
+ char **bufptr, /* IO - Current position in buffer */
+ char **buffer, /* IO - Current buffer */
+ int *bufsize) /* IO - Current buffer size */
+{
+ char *newbuffer; /* New buffer value */
+
+
+ if (*bufptr >= (*buffer + *bufsize - 4))
+ {
+ /*
+ * Increase the size of the buffer...
+ */
+
+ if (*bufsize < 1024)
+ (*bufsize) *= 2;
+ else
+ (*bufsize) += 1024;
+
+ if ((newbuffer = realloc(*buffer, *bufsize)) == NULL)
+ {
+ free(*buffer);
+
+ mxml_error("Unable to expand string buffer to %d bytes!", *bufsize);
+
+ return (-1);
+ }
+
+ *bufptr = newbuffer + (*bufptr - *buffer);
+ *buffer = newbuffer;
+ }
+
+ if (ch < 0x80)
+ {
+ /*
+ * Single byte ASCII...
+ */
+
+ *(*bufptr)++ = ch;
+ }
+ else if (ch < 0x800)
+ {
+ /*
+ * Two-byte UTF-8...
+ */
+
+ *(*bufptr)++ = 0xc0 | (ch >> 6);
+ *(*bufptr)++ = 0x80 | (ch & 0x3f);
+ }
+ else if (ch < 0x10000)
+ {
+ /*
+ * Three-byte UTF-8...
+ */
+
+ *(*bufptr)++ = 0xe0 | (ch >> 12);
+ *(*bufptr)++ = 0x80 | ((ch >> 6) & 0x3f);
+ *(*bufptr)++ = 0x80 | (ch & 0x3f);
+ }
+ else
+ {
+ /*
+ * Four-byte UTF-8...
+ */
+
+ *(*bufptr)++ = 0xf0 | (ch >> 18);
+ *(*bufptr)++ = 0x80 | ((ch >> 12) & 0x3f);
+ *(*bufptr)++ = 0x80 | ((ch >> 6) & 0x3f);
+ *(*bufptr)++ = 0x80 | (ch & 0x3f);
+ }
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_fd_getc()' - Read a character from a file descriptor.
+ */
+
+static int /* O - Character or EOF */
+mxml_fd_getc(void *p, /* I - File descriptor buffer */
+ int *encoding) /* IO - Encoding */
+{
+ _mxml_fdbuf_t *buf; /* File descriptor buffer */
+ int ch, /* Current character */
+ temp; /* Temporary character */
+
+
+ /*
+ * Grab the next character in the buffer...
+ */
+
+ buf = (_mxml_fdbuf_t *)p;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ ch = *(buf->current)++;
+
+ switch (*encoding)
+ {
+ case ENCODE_UTF8 :
+ /*
+ * Got a UTF-8 character; convert UTF-8 to Unicode and return...
+ */
+
+ if (!(ch & 0x80))
+ {
+#if DEBUG > 1
+ printf("mxml_fd_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+
+ return (ch);
+ }
+ else if (ch == 0xfe)
+ {
+ /*
+ * UTF-16 big-endian BOM?
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ ch = *(buf->current)++;
+
+ if (ch != 0xff)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16BE;
+
+ return (mxml_fd_getc(p, encoding));
+ }
+ else if (ch == 0xff)
+ {
+ /*
+ * UTF-16 little-endian BOM?
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ ch = *(buf->current)++;
+
+ if (ch != 0xfe)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16LE;
+
+ return (mxml_fd_getc(p, encoding));
+ }
+ else if ((ch & 0xe0) == 0xc0)
+ {
+ /*
+ * Two-byte value...
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x1f) << 6) | (temp & 0x3f);
+
+ if (ch < 0x80)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+ }
+ else if ((ch & 0xf0) == 0xe0)
+ {
+ /*
+ * Three-byte value...
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x0f) << 6) | (temp & 0x3f);
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if (ch < 0x800)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+
+ /*
+ * Ignore (strip) Byte Order Mark (BOM)...
+ */
+
+ if (ch == 0xfeff)
+ return (mxml_fd_getc(p, encoding));
+ }
+ else if ((ch & 0xf8) == 0xf0)
+ {
+ /*
+ * Four-byte value...
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x07) << 6) | (temp & 0x3f);
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ if ((temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if (ch < 0x10000)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+ }
+ else
+ return (EOF);
+ break;
+
+ case ENCODE_UTF16BE :
+ /*
+ * Read UTF-16 big-endian char...
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ ch = (ch << 8) | temp;
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ lch = *(buf->current)++;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ lch = (lch << 8) | temp;
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+ break;
+
+ case ENCODE_UTF16LE :
+ /*
+ * Read UTF-16 little-endian char...
+ */
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ ch |= (temp << 8);
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ lch = *(buf->current)++;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_read(buf) < 0)
+ return (EOF);
+
+ temp = *(buf->current)++;
+
+ lch |= (temp << 8);
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+ break;
+ }
+
+#if DEBUG > 1
+ printf("mxml_fd_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+}
+
+
+/*
+ * 'mxml_fd_putc()' - Write a character to a file descriptor.
+ */
+
+static int /* O - 0 on success, -1 on error */
+mxml_fd_putc(int ch, /* I - Character */
+ void *p) /* I - File descriptor buffer */
+{
+ _mxml_fdbuf_t *buf; /* File descriptor buffer */
+
+
+ /*
+ * Flush the write buffer as needed...
+ */
+
+ buf = (_mxml_fdbuf_t *)p;
+
+ if (buf->current >= buf->end)
+ if (mxml_fd_write(buf) < 0)
+ return (-1);
+
+ *(buf->current)++ = ch;
+
+ /*
+ * Return successfully...
+ */
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_fd_read()' - Read a buffer of data from a file descriptor.
+ */
+
+static int /* O - 0 on success, -1 on error */
+mxml_fd_read(_mxml_fdbuf_t *buf) /* I - File descriptor buffer */
+{
+ int bytes; /* Bytes read... */
+
+
+ /*
+ * Range check input...
+ */
+
+ if (!buf)
+ return (-1);
+
+ /*
+ * Read from the file descriptor...
+ */
+
+ while ((bytes = read(buf->fd, buf->buffer, sizeof(buf->buffer))) < 0)
+#ifdef EINTR
+ if (errno != EAGAIN && errno != EINTR)
+#else
+ if (errno != EAGAIN)
+#endif /* EINTR */
+ return (-1);
+
+ if (bytes == 0)
+ return (-1);
+
+ /*
+ * Update the pointers and return success...
+ */
+
+ buf->current = buf->buffer;
+ buf->end = buf->buffer + bytes;
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_fd_write()' - Write a buffer of data to a file descriptor.
+ */
+
+static int /* O - 0 on success, -1 on error */
+mxml_fd_write(_mxml_fdbuf_t *buf) /* I - File descriptor buffer */
+{
+ int bytes; /* Bytes written */
+ unsigned char *ptr; /* Pointer into buffer */
+
+
+ /*
+ * Range check...
+ */
+
+ if (!buf)
+ return (-1);
+
+ /*
+ * Return 0 if there is nothing to write...
+ */
+
+ if (buf->current == buf->buffer)
+ return (0);
+
+ /*
+ * Loop until we have written everything...
+ */
+
+ for (ptr = buf->buffer; ptr < buf->current; ptr += bytes)
+ if ((bytes = write(buf->fd, ptr, buf->current - ptr)) < 0)
+ return (-1);
+
+ /*
+ * All done, reset pointers and return success...
+ */
+
+ buf->current = buf->buffer;
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_file_getc()' - Get a character from a file.
+ */
+
+static int /* O - Character or EOF */
+mxml_file_getc(void *p, /* I - Pointer to file */
+ int *encoding) /* IO - Encoding */
+{
+ int ch, /* Character from file */
+ temp; /* Temporary character */
+ FILE *fp; /* Pointer to file */
+
+
+ /*
+ * Read a character from the file and see if it is EOF or ASCII...
+ */
+
+ fp = (FILE *)p;
+ ch = getc(fp);
+
+ if (ch == EOF)
+ return (EOF);
+
+ switch (*encoding)
+ {
+ case ENCODE_UTF8 :
+ /*
+ * Got a UTF-8 character; convert UTF-8 to Unicode and return...
+ */
+
+ if (!(ch & 0x80))
+ {
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+
+#if DEBUG > 1
+ printf("mxml_file_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+ }
+ else if (ch == 0xfe)
+ {
+ /*
+ * UTF-16 big-endian BOM?
+ */
+
+ ch = getc(fp);
+ if (ch != 0xff)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16BE;
+
+ return (mxml_file_getc(p, encoding));
+ }
+ else if (ch == 0xff)
+ {
+ /*
+ * UTF-16 little-endian BOM?
+ */
+
+ ch = getc(fp);
+ if (ch != 0xfe)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16LE;
+
+ return (mxml_file_getc(p, encoding));
+ }
+ else if ((ch & 0xe0) == 0xc0)
+ {
+ /*
+ * Two-byte value...
+ */
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x1f) << 6) | (temp & 0x3f);
+
+ if (ch < 0x80)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+ }
+ else if ((ch & 0xf0) == 0xe0)
+ {
+ /*
+ * Three-byte value...
+ */
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x0f) << 6) | (temp & 0x3f);
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if (ch < 0x800)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+
+ /*
+ * Ignore (strip) Byte Order Mark (BOM)...
+ */
+
+ if (ch == 0xfeff)
+ return (mxml_file_getc(p, encoding));
+ }
+ else if ((ch & 0xf8) == 0xf0)
+ {
+ /*
+ * Four-byte value...
+ */
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x07) << 6) | (temp & 0x3f);
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if ((temp = getc(fp)) == EOF || (temp & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = (ch << 6) | (temp & 0x3f);
+
+ if (ch < 0x10000)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+ }
+ else
+ return (EOF);
+ break;
+
+ case ENCODE_UTF16BE :
+ /*
+ * Read UTF-16 big-endian char...
+ */
+
+ ch = (ch << 8) | getc(fp);
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch = (getc(fp) << 8);
+ lch |= getc(fp);
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+ break;
+
+ case ENCODE_UTF16LE :
+ /*
+ * Read UTF-16 little-endian char...
+ */
+
+ ch |= (getc(fp) << 8);
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch = getc(fp);
+ lch |= (getc(fp) << 8);
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+ break;
+ }
+
+#if DEBUG > 1
+ printf("mxml_file_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+}
+
+
+/*
+ * 'mxml_file_putc()' - Write a character to a file.
+ */
+
+static int /* O - 0 on success, -1 on failure */
+mxml_file_putc(int ch, /* I - Character to write */
+ void *p) /* I - Pointer to file */
+{
+ return (putc(ch, (FILE *)p) == EOF ? -1 : 0);
+}
+
+
+/*
+ * 'mxml_get_entity()' - Get the character corresponding to an entity...
+ */
+
+static int /* O - Character value or EOF on error */
+mxml_get_entity(mxml_node_t *parent, /* I - Parent node */
+ void *p, /* I - Pointer to source */
+ int *encoding, /* IO - Character encoding */
+ int (*getc_cb)(void *, int *))
+ /* I - Get character function */
+{
+ int ch; /* Current character */
+ char entity[64], /* Entity string */
+ *entptr; /* Pointer into entity */
+
+
+ entptr = entity;
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ if (ch > 126 || (!isalnum(ch) && ch != '#'))
+ break;
+ else if (entptr < (entity + sizeof(entity) - 1))
+ *entptr++ = ch;
+ else
+ {
+ mxml_error("Entity name too long under parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ break;
+ }
+
+ *entptr = '\0';
+
+ if (ch != ';')
+ {
+ mxml_error("Character entity \"%s\" not terminated under parent <%s>!",
+ entity, parent ? parent->value.element.name : "null");
+ return (EOF);
+ }
+
+ if (entity[0] == '#')
+ {
+ if (entity[1] == 'x')
+ ch = strtol(entity + 2, NULL, 16);
+ else
+ ch = strtol(entity + 1, NULL, 10);
+ }
+ else if ((ch = mxmlEntityGetValue(entity)) < 0)
+ mxml_error("Entity name \"%s;\" not supported under parent <%s>!",
+ entity, parent ? parent->value.element.name : "null");
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x under parent <%s> not allowed by XML standard!",
+ ch, parent ? parent->value.element.name : "null");
+ return (EOF);
+ }
+
+ return (ch);
+}
+
+
+/*
+ * 'mxml_load_data()' - Load data into an XML node tree.
+ */
+
+static mxml_node_t * /* O - First node or NULL if the file could not be read. */
+mxml_load_data(
+ mxml_node_t *top, /* I - Top node */
+ void *p, /* I - Pointer to data */
+ mxml_load_cb_t cb, /* I - Callback function or MXML_NO_CALLBACK */
+ _mxml_getc_cb_t getc_cb, /* I - Read function */
+ mxml_sax_cb_t sax_cb, /* I - SAX callback or MXML_NO_CALLBACK */
+ void *sax_data) /* I - SAX user data */
+{
+ mxml_node_t *node, /* Current node */
+ *first, /* First node added */
+ *parent; /* Current parent node */
+ int ch, /* Character from file */
+ whitespace; /* Non-zero if whitespace seen */
+ char *buffer, /* String buffer */
+ *bufptr; /* Pointer into buffer */
+ int bufsize; /* Size of buffer */
+ mxml_type_t type; /* Current node type */
+ int encoding; /* Character encoding */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+ static const char * const types[] = /* Type strings... */
+ {
+ "MXML_ELEMENT", /* XML element with attributes */
+ "MXML_INTEGER", /* Integer value */
+ "MXML_OPAQUE", /* Opaque string */
+ "MXML_REAL", /* Real value */
+ "MXML_TEXT", /* Text fragment */
+ "MXML_CUSTOM" /* Custom data */
+ };
+
+
+ /*
+ * Read elements and other nodes from the file...
+ */
+
+ if ((buffer = malloc(64)) == NULL)
+ {
+ mxml_error("Unable to allocate string buffer!");
+ return (NULL);
+ }
+
+ bufsize = 64;
+ bufptr = buffer;
+ parent = top;
+ first = NULL;
+ whitespace = 0;
+ encoding = ENCODE_UTF8;
+
+ if (cb && parent)
+ type = (*cb)(parent);
+ else
+ type = MXML_TEXT;
+
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF)
+ {
+ if ((ch == '<' ||
+ (mxml_isspace(ch) && type != MXML_OPAQUE && type != MXML_CUSTOM)) &&
+ bufptr > buffer)
+ {
+ /*
+ * Add a new value node...
+ */
+
+ *bufptr = '\0';
+
+ switch (type)
+ {
+ case MXML_INTEGER :
+ node = mxmlNewInteger(parent, strtol(buffer, &bufptr, 0));
+ break;
+
+ case MXML_OPAQUE :
+ node = mxmlNewOpaque(parent, buffer);
+ break;
+
+ case MXML_REAL :
+ node = mxmlNewReal(parent, strtod(buffer, &bufptr));
+ break;
+
+ case MXML_TEXT :
+ node = mxmlNewText(parent, whitespace, buffer);
+ break;
+
+ case MXML_CUSTOM :
+ if (global->custom_load_cb)
+ {
+ /*
+ * Use the callback to fill in the custom data...
+ */
+
+ node = mxmlNewCustom(parent, NULL, NULL);
+
+ if ((*global->custom_load_cb)(node, buffer))
+ {
+ mxml_error("Bad custom value '%s' in parent <%s>!",
+ buffer, parent ? parent->value.element.name : "null");
+ mxmlDelete(node);
+ node = NULL;
+ }
+ break;
+ }
+
+ default : /* Ignore... */
+ node = NULL;
+ break;
+ }
+
+ if (*bufptr)
+ {
+ /*
+ * Bad integer/real number value...
+ */
+
+ mxml_error("Bad %s value '%s' in parent <%s>!",
+ type == MXML_INTEGER ? "integer" : "real", buffer,
+ parent ? parent->value.element.name : "null");
+ break;
+ }
+
+ bufptr = buffer;
+ whitespace = mxml_isspace(ch) && type == MXML_TEXT;
+
+ if (!node && type != MXML_IGNORE)
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Unable to add value node of type %s to parent <%s>!",
+ types[type], parent ? parent->value.element.name : "null");
+ goto error;
+ }
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_DATA, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (!first && node)
+ first = node;
+ }
+ else if (mxml_isspace(ch) && type == MXML_TEXT)
+ whitespace = 1;
+
+ /*
+ * Add lone whitespace node if we have an element and existing
+ * whitespace...
+ */
+
+ if (ch == '<' && whitespace && type == MXML_TEXT)
+ {
+ if (parent)
+ {
+ node = mxmlNewText(parent, whitespace, "");
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_DATA, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (!first && node)
+ first = node;
+ }
+
+ whitespace = 0;
+ }
+
+ if (ch == '<')
+ {
+ /*
+ * Start of open/close tag...
+ */
+
+ bufptr = buffer;
+
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF)
+ if (mxml_isspace(ch) || ch == '>' || (ch == '/' && bufptr > buffer))
+ break;
+ else if (ch == '<')
+ {
+ mxml_error("Bare < in element!");
+ goto error;
+ }
+ else if (ch == '&')
+ {
+ if ((ch = mxml_get_entity(parent, p, &encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+ else if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ else if (((bufptr - buffer) == 1 && buffer[0] == '?') ||
+ ((bufptr - buffer) == 3 && !strncmp(buffer, "!--", 3)) ||
+ ((bufptr - buffer) == 8 && !strncmp(buffer, "![CDATA[", 8)))
+ break;
+
+ *bufptr = '\0';
+
+ if (!strcmp(buffer, "!--"))
+ {
+ /*
+ * Gather rest of comment...
+ */
+
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF)
+ {
+ if (ch == '>' && bufptr > (buffer + 4) &&
+ bufptr[-3] != '-' && bufptr[-2] == '-' && bufptr[-1] == '-')
+ break;
+ else if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+
+ /*
+ * Error out if we didn't get the whole comment...
+ */
+
+ if (ch != '>')
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Early EOF in comment node!");
+ goto error;
+ }
+
+
+ /*
+ * Otherwise add this as an element under the current parent...
+ */
+
+ *bufptr = '\0';
+
+ if (!parent && first)
+ {
+ /*
+ * There can only be one root element!
+ */
+
+ mxml_error("<%s> cannot be a second root node after <%s>",
+ buffer, first->value.element.name);
+ goto error;
+ }
+
+ if ((node = mxmlNewElement(parent, buffer)) == NULL)
+ {
+ /*
+ * Just print error for now...
+ */
+
+ mxml_error("Unable to add comment node to parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ break;
+ }
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_COMMENT, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (node && !first)
+ first = node;
+ }
+ else if (!strcmp(buffer, "![CDATA["))
+ {
+ /*
+ * Gather CDATA section...
+ */
+
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF)
+ {
+ if (ch == '>' && !strncmp(bufptr - 2, "]]", 2))
+ break;
+ else if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+
+ /*
+ * Error out if we didn't get the whole comment...
+ */
+
+ if (ch != '>')
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Early EOF in CDATA node!");
+ goto error;
+ }
+
+
+ /*
+ * Otherwise add this as an element under the current parent...
+ */
+
+ *bufptr = '\0';
+
+ if (!parent && first)
+ {
+ /*
+ * There can only be one root element!
+ */
+
+ mxml_error("<%s> cannot be a second root node after <%s>",
+ buffer, first->value.element.name);
+ goto error;
+ }
+
+ if ((node = mxmlNewElement(parent, buffer)) == NULL)
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Unable to add CDATA node to parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ goto error;
+ }
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_CDATA, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (node && !first)
+ first = node;
+ }
+ else if (buffer[0] == '?')
+ {
+ /*
+ * Gather rest of processing instruction...
+ */
+
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF)
+ {
+ if (ch == '>' && bufptr > buffer && bufptr[-1] == '?')
+ break;
+ else if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+
+ /*
+ * Error out if we didn't get the whole processing instruction...
+ */
+
+ if (ch != '>')
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Early EOF in processing instruction node!");
+ goto error;
+ }
+
+ /*
+ * Otherwise add this as an element under the current parent...
+ */
+
+ *bufptr = '\0';
+
+ if (!parent && first)
+ {
+ /*
+ * There can only be one root element!
+ */
+
+ mxml_error("<%s> cannot be a second root node after <%s>",
+ buffer, first->value.element.name);
+ goto error;
+ }
+
+ if ((node = mxmlNewElement(parent, buffer)) == NULL)
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Unable to add processing instruction node to parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ goto error;
+ }
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_DIRECTIVE, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (node)
+ {
+ if (!first)
+ first = node;
+
+ if (!parent)
+ {
+ parent = node;
+
+ if (cb)
+ type = (*cb)(parent);
+ }
+ }
+ }
+ else if (buffer[0] == '!')
+ {
+ /*
+ * Gather rest of declaration...
+ */
+
+ do
+ {
+ if (ch == '>')
+ break;
+ else
+ {
+ if (ch == '&')
+ if ((ch = mxml_get_entity(parent, p, &encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+ }
+ while ((ch = (*getc_cb)(p, &encoding)) != EOF);
+
+ /*
+ * Error out if we didn't get the whole declaration...
+ */
+
+ if (ch != '>')
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Early EOF in declaration node!");
+ goto error;
+ }
+
+ /*
+ * Otherwise add this as an element under the current parent...
+ */
+
+ *bufptr = '\0';
+
+ if (!parent && first)
+ {
+ /*
+ * There can only be one root element!
+ */
+
+ mxml_error("<%s> cannot be a second root node after <%s>",
+ buffer, first->value.element.name);
+ goto error;
+ }
+
+ if ((node = mxmlNewElement(parent, buffer)) == NULL)
+ {
+ /*
+ * Print error and return...
+ */
+
+ mxml_error("Unable to add declaration node to parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ goto error;
+ }
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_DIRECTIVE, sax_data);
+
+ if (!mxmlRelease(node))
+ node = NULL;
+ }
+
+ if (node)
+ {
+ if (!first)
+ first = node;
+
+ if (!parent)
+ {
+ parent = node;
+
+ if (cb)
+ type = (*cb)(parent);
+ }
+ }
+ }
+ else if (buffer[0] == '/')
+ {
+ /*
+ * Handle close tag...
+ */
+
+ if (!parent || strcmp(buffer + 1, parent->value.element.name))
+ {
+ /*
+ * Close tag doesn't match tree; print an error for now...
+ */
+
+ mxml_error("Mismatched close tag <%s> under parent <%s>!",
+ buffer, parent ? parent->value.element.name : "(null)");
+ goto error;
+ }
+
+ /*
+ * Keep reading until we see >...
+ */
+
+ while (ch != '>' && ch != EOF)
+ ch = (*getc_cb)(p, &encoding);
+
+ node = parent;
+ parent = parent->parent;
+
+ if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_ELEMENT_CLOSE, sax_data);
+
+ if (!mxmlRelease(node) && first == node)
+ first = NULL;
+ }
+
+ /*
+ * Ascend into the parent and set the value type as needed...
+ */
+
+ if (cb && parent)
+ type = (*cb)(parent);
+ }
+ else
+ {
+ /*
+ * Handle open tag...
+ */
+
+ if (!parent && first)
+ {
+ /*
+ * There can only be one root element!
+ */
+
+ mxml_error("<%s> cannot be a second root node after <%s>",
+ buffer, first->value.element.name);
+ goto error;
+ }
+
+ if ((node = mxmlNewElement(parent, buffer)) == NULL)
+ {
+ /*
+ * Just print error for now...
+ */
+
+ mxml_error("Unable to add element node to parent <%s>!",
+ parent ? parent->value.element.name : "null");
+ goto error;
+ }
+
+ if (mxml_isspace(ch))
+ {
+ if ((ch = mxml_parse_element(node, p, &encoding, getc_cb)) == EOF)
+ goto error;
+ }
+ else if (ch == '/')
+ {
+ if ((ch = (*getc_cb)(p, &encoding)) != '>')
+ {
+ mxml_error("Expected > but got '%c' instead for element <%s/>!",
+ ch, buffer);
+ mxmlDelete(node);
+ goto error;
+ }
+
+ ch = '/';
+ }
+
+ if (sax_cb)
+ (*sax_cb)(node, MXML_SAX_ELEMENT_OPEN, sax_data);
+
+ if (!first)
+ first = node;
+
+ if (ch == EOF)
+ break;
+
+ if (ch != '/')
+ {
+ /*
+ * Descend into this node, setting the value type as needed...
+ */
+
+ parent = node;
+
+ if (cb && parent)
+ type = (*cb)(parent);
+ }
+ else if (sax_cb)
+ {
+ (*sax_cb)(node, MXML_SAX_ELEMENT_CLOSE, sax_data);
+
+ if (!mxmlRelease(node) && first == node)
+ first = NULL;
+ }
+ }
+
+ bufptr = buffer;
+ }
+ else if (ch == '&')
+ {
+ /*
+ * Add character entity to current buffer...
+ */
+
+ if ((ch = mxml_get_entity(parent, p, &encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+ else if (type == MXML_OPAQUE || type == MXML_CUSTOM || !mxml_isspace(ch))
+ {
+ /*
+ * Add character to current buffer...
+ */
+
+ if (mxml_add_char(ch, &bufptr, &buffer, &bufsize))
+ goto error;
+ }
+ }
+
+ /*
+ * Free the string buffer - we don't need it anymore...
+ */
+
+ free(buffer);
+
+ /*
+ * Find the top element and return it...
+ */
+
+ if (parent)
+ {
+ node = parent;
+
+ while (parent->parent != top && parent->parent)
+ parent = parent->parent;
+
+ if (node != parent)
+ {
+ mxml_error("Missing close tag </%s> under parent <%s>!",
+ node->value.element.name,
+ node->parent ? node->parent->value.element.name : "(null)");
+
+ mxmlDelete(first);
+
+ return (NULL);
+ }
+ }
+
+ if (parent)
+ return (parent);
+ else
+ return (first);
+
+ /*
+ * Common error return...
+ */
+
+error:
+
+ mxmlDelete(first);
+
+ free(buffer);
+
+ return (NULL);
+}
+
+
+/*
+ * 'mxml_parse_element()' - Parse an element for any attributes...
+ */
+
+static int /* O - Terminating character */
+mxml_parse_element(
+ mxml_node_t *node, /* I - Element node */
+ void *p, /* I - Data to read from */
+ int *encoding, /* IO - Encoding */
+ _mxml_getc_cb_t getc_cb) /* I - Data callback */
+{
+ int ch, /* Current character in file */
+ quote; /* Quoting character */
+ char *name, /* Attribute name */
+ *value, /* Attribute value */
+ *ptr; /* Pointer into name/value */
+ int namesize, /* Size of name string */
+ valsize; /* Size of value string */
+
+
+ /*
+ * Initialize the name and value buffers...
+ */
+
+ if ((name = malloc(64)) == NULL)
+ {
+ mxml_error("Unable to allocate memory for name!");
+ return (EOF);
+ }
+
+ namesize = 64;
+
+ if ((value = malloc(64)) == NULL)
+ {
+ free(name);
+ mxml_error("Unable to allocate memory for value!");
+ return (EOF);
+ }
+
+ valsize = 64;
+
+ /*
+ * Loop until we hit a >, /, ?, or EOF...
+ */
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ {
+#if DEBUG > 1
+ fprintf(stderr, "parse_element: ch='%c'\n", ch);
+#endif /* DEBUG > 1 */
+
+ /*
+ * Skip leading whitespace...
+ */
+
+ if (mxml_isspace(ch))
+ continue;
+
+ /*
+ * Stop at /, ?, or >...
+ */
+
+ if (ch == '/' || ch == '?')
+ {
+ /*
+ * Grab the > character and print an error if it isn't there...
+ */
+
+ quote = (*getc_cb)(p, encoding);
+
+ if (quote != '>')
+ {
+ mxml_error("Expected '>' after '%c' for element %s, but got '%c'!",
+ ch, node->value.element.name, quote);
+ goto error;
+ }
+
+ break;
+ }
+ else if (ch == '<')
+ {
+ mxml_error("Bare < in element %s!", node->value.element.name);
+ goto error;
+ }
+ else if (ch == '>')
+ break;
+
+ /*
+ * Read the attribute name...
+ */
+
+ name[0] = ch;
+ ptr = name + 1;
+
+ if (ch == '\"' || ch == '\'')
+ {
+ /*
+ * Name is in quotes, so get a quoted string...
+ */
+
+ quote = ch;
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ {
+ if (ch == '&')
+ if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &ptr, &name, &namesize))
+ goto error;
+
+ if (ch == quote)
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Grab an normal, non-quoted name...
+ */
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ if (mxml_isspace(ch) || ch == '=' || ch == '/' || ch == '>' ||
+ ch == '?')
+ break;
+ else
+ {
+ if (ch == '&')
+ if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &ptr, &name, &namesize))
+ goto error;
+ }
+ }
+
+ *ptr = '\0';
+
+ if (mxmlElementGetAttr(node, name))
+ goto error;
+
+ while (ch != EOF && mxml_isspace(ch))
+ ch = (*getc_cb)(p, encoding);
+
+ if (ch == '=')
+ {
+ /*
+ * Read the attribute value...
+ */
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF && mxml_isspace(ch));
+
+ if (ch == EOF)
+ {
+ mxml_error("Missing value for attribute '%s' in element %s!",
+ name, node->value.element.name);
+ goto error;
+ }
+
+ if (ch == '\'' || ch == '\"')
+ {
+ /*
+ * Read quoted value...
+ */
+
+ quote = ch;
+ ptr = value;
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ if (ch == quote)
+ break;
+ else
+ {
+ if (ch == '&')
+ if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &ptr, &value, &valsize))
+ goto error;
+ }
+
+ *ptr = '\0';
+ }
+ else
+ {
+ /*
+ * Read unquoted value...
+ */
+
+ value[0] = ch;
+ ptr = value + 1;
+
+ while ((ch = (*getc_cb)(p, encoding)) != EOF)
+ if (mxml_isspace(ch) || ch == '=' || ch == '/' || ch == '>')
+ break;
+ else
+ {
+ if (ch == '&')
+ if ((ch = mxml_get_entity(node, p, encoding, getc_cb)) == EOF)
+ goto error;
+
+ if (mxml_add_char(ch, &ptr, &value, &valsize))
+ goto error;
+ }
+
+ *ptr = '\0';
+ }
+
+ /*
+ * Set the attribute with the given string value...
+ */
+
+ mxmlElementSetAttr(node, name, value);
+ }
+ else
+ {
+ mxml_error("Missing value for attribute '%s' in element %s!",
+ name, node->value.element.name);
+ goto error;
+ }
+
+ /*
+ * Check the end character...
+ */
+
+ if (ch == '/' || ch == '?')
+ {
+ /*
+ * Grab the > character and print an error if it isn't there...
+ */
+
+ quote = (*getc_cb)(p, encoding);
+
+ if (quote != '>')
+ {
+ mxml_error("Expected '>' after '%c' for element %s, but got '%c'!",
+ ch, node->value.element.name, quote);
+ ch = EOF;
+ }
+
+ break;
+ }
+ else if (ch == '>')
+ break;
+ }
+
+ /*
+ * Free the name and value buffers and return...
+ */
+
+ free(name);
+ free(value);
+
+ return (ch);
+
+ /*
+ * Common error return point...
+ */
+
+error:
+
+ free(name);
+ free(value);
+
+ return (EOF);
+}
+
+
+/*
+ * 'mxml_string_getc()' - Get a character from a string.
+ */
+
+static int /* O - Character or EOF */
+mxml_string_getc(void *p, /* I - Pointer to file */
+ int *encoding) /* IO - Encoding */
+{
+ int ch; /* Character */
+ const char **s; /* Pointer to string pointer */
+
+
+ s = (const char **)p;
+
+ if ((ch = (*s)[0] & 255) != 0 || *encoding == ENCODE_UTF16LE)
+ {
+ /*
+ * Got character; convert UTF-8 to integer and return...
+ */
+
+ (*s)++;
+
+ switch (*encoding)
+ {
+ case ENCODE_UTF8 :
+ if (!(ch & 0x80))
+ {
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+
+ return (ch);
+ }
+ else if (ch == 0xfe)
+ {
+ /*
+ * UTF-16 big-endian BOM?
+ */
+
+ if (((*s)[0] & 255) != 0xff)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16BE;
+ (*s)++;
+
+ return (mxml_string_getc(p, encoding));
+ }
+ else if (ch == 0xff)
+ {
+ /*
+ * UTF-16 little-endian BOM?
+ */
+
+ if (((*s)[0] & 255) != 0xfe)
+ return (EOF);
+
+ *encoding = ENCODE_UTF16LE;
+ (*s)++;
+
+ return (mxml_string_getc(p, encoding));
+ }
+ else if ((ch & 0xe0) == 0xc0)
+ {
+ /*
+ * Two-byte value...
+ */
+
+ if (((*s)[0] & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((ch & 0x1f) << 6) | ((*s)[0] & 0x3f);
+
+ (*s)++;
+
+ if (ch < 0x80)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+ }
+ else if ((ch & 0xf0) == 0xe0)
+ {
+ /*
+ * Three-byte value...
+ */
+
+ if (((*s)[0] & 0xc0) != 0x80 ||
+ ((*s)[1] & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((((ch & 0x0f) << 6) | ((*s)[0] & 0x3f)) << 6) | ((*s)[1] & 0x3f);
+
+ (*s) += 2;
+
+ if (ch < 0x800)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+
+ /*
+ * Ignore (strip) Byte Order Mark (BOM)...
+ */
+
+ if (ch == 0xfeff)
+ return (mxml_string_getc(p, encoding));
+
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+ }
+ else if ((ch & 0xf8) == 0xf0)
+ {
+ /*
+ * Four-byte value...
+ */
+
+ if (((*s)[0] & 0xc0) != 0x80 ||
+ ((*s)[1] & 0xc0) != 0x80 ||
+ ((*s)[2] & 0xc0) != 0x80)
+ return (EOF);
+
+ ch = ((((((ch & 0x07) << 6) | ((*s)[0] & 0x3f)) << 6) |
+ ((*s)[1] & 0x3f)) << 6) | ((*s)[2] & 0x3f);
+
+ (*s) += 3;
+
+ if (ch < 0x10000)
+ {
+ mxml_error("Invalid UTF-8 sequence for character 0x%04x!", ch);
+ return (EOF);
+ }
+
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+ }
+ else
+ return (EOF);
+
+ case ENCODE_UTF16BE :
+ /*
+ * Read UTF-16 big-endian char...
+ */
+
+ ch = (ch << 8) | ((*s)[0] & 255);
+ (*s) ++;
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch; /* Lower word */
+
+
+ if (!(*s)[0])
+ return (EOF);
+
+ lch = (((*s)[0] & 255) << 8) | ((*s)[1] & 255);
+ (*s) += 2;
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+
+ case ENCODE_UTF16LE :
+ /*
+ * Read UTF-16 little-endian char...
+ */
+
+ ch = ch | (((*s)[0] & 255) << 8);
+
+ if (!ch)
+ {
+ (*s) --;
+ return (EOF);
+ }
+
+ (*s) ++;
+
+ if (mxml_bad_char(ch))
+ {
+ mxml_error("Bad control character 0x%02x not allowed by XML standard!",
+ ch);
+ return (EOF);
+ }
+ else if (ch >= 0xd800 && ch <= 0xdbff)
+ {
+ /*
+ * Multi-word UTF-16 char...
+ */
+
+ int lch; /* Lower word */
+
+
+ if (!(*s)[1])
+ return (EOF);
+
+ lch = (((*s)[1] & 255) << 8) | ((*s)[0] & 255);
+ (*s) += 2;
+
+ if (lch < 0xdc00 || lch >= 0xdfff)
+ return (EOF);
+
+ ch = (((ch & 0x3ff) << 10) | (lch & 0x3ff)) + 0x10000;
+ }
+
+#if DEBUG > 1
+ printf("mxml_string_getc: %c (0x%04x)\n", ch < ' ' ? '.' : ch, ch);
+#endif /* DEBUG > 1 */
+
+ return (ch);
+ }
+ }
+
+ return (EOF);
+}
+
+
+/*
+ * 'mxml_string_putc()' - Write a character to a string.
+ */
+
+static int /* O - 0 on success, -1 on failure */
+mxml_string_putc(int ch, /* I - Character to write */
+ void *p) /* I - Pointer to string pointers */
+{
+ char **pp; /* Pointer to string pointers */
+
+
+ pp = (char **)p;
+
+ if (pp[0] < pp[1])
+ pp[0][0] = ch;
+
+ pp[0] ++;
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_write_name()' - Write a name string.
+ */
+
+static int /* O - 0 on success, -1 on failure */
+mxml_write_name(const char *s, /* I - Name to write */
+ void *p, /* I - Write pointer */
+ int (*putc_cb)(int, void *))
+ /* I - Write callback */
+{
+ char quote; /* Quote character */
+ const char *name; /* Entity name */
+
+
+ if (*s == '\"' || *s == '\'')
+ {
+ /*
+ * Write a quoted name string...
+ */
+
+ if ((*putc_cb)(*s, p) < 0)
+ return (-1);
+
+ quote = *s++;
+
+ while (*s && *s != quote)
+ {
+ if ((name = mxmlEntityGetName(*s)) != NULL)
+ {
+ if ((*putc_cb)('&', p) < 0)
+ return (-1);
+
+ while (*name)
+ {
+ if ((*putc_cb)(*name, p) < 0)
+ return (-1);
+
+ name ++;
+ }
+
+ if ((*putc_cb)(';', p) < 0)
+ return (-1);
+ }
+ else if ((*putc_cb)(*s, p) < 0)
+ return (-1);
+
+ s ++;
+ }
+
+ /*
+ * Write the end quote...
+ */
+
+ if ((*putc_cb)(quote, p) < 0)
+ return (-1);
+ }
+ else
+ {
+ /*
+ * Write a non-quoted name string...
+ */
+
+ while (*s)
+ {
+ if ((*putc_cb)(*s, p) < 0)
+ return (-1);
+
+ s ++;
+ }
+ }
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_write_node()' - Save an XML node to a file.
+ */
+
+static int /* O - Column or -1 on error */
+mxml_write_node(mxml_node_t *node, /* I - Node to write */
+ void *p, /* I - File to write to */
+ mxml_save_cb_t cb, /* I - Whitespace callback */
+ int col, /* I - Current column */
+ _mxml_putc_cb_t putc_cb,/* I - Output callback */
+ _mxml_global_t *global)/* I - Global data */
+{
+ int i, /* Looping var */
+ width; /* Width of attr + value */
+ mxml_attr_t *attr; /* Current attribute */
+ char s[255]; /* Temporary string */
+
+
+ /*
+ * Print the node value...
+ */
+
+ switch (node->type)
+ {
+ case MXML_ELEMENT :
+ col = mxml_write_ws(node, p, cb, MXML_WS_BEFORE_OPEN, col, putc_cb);
+
+ if ((*putc_cb)('<', p) < 0)
+ return (-1);
+ if (node->value.element.name[0] == '?' ||
+ !strncmp(node->value.element.name, "!--", 3) ||
+ !strncmp(node->value.element.name, "![CDATA[", 8))
+ {
+ /*
+ * Comments, CDATA, and processing instructions do not
+ * use character entities.
+ */
+
+ const char *ptr; /* Pointer into name */
+
+
+ for (ptr = node->value.element.name; *ptr; ptr ++)
+ if ((*putc_cb)(*ptr, p) < 0)
+ return (-1);
+ }
+ else if (mxml_write_name(node->value.element.name, p, putc_cb) < 0)
+ return (-1);
+
+ col += strlen(node->value.element.name) + 1;
+
+ for (i = node->value.element.num_attrs, attr = node->value.element.attrs;
+ i > 0;
+ i --, attr ++)
+ {
+ width = strlen(attr->name);
+
+ if (attr->value)
+ width += strlen(attr->value) + 3;
+
+ if (global->wrap > 0 && (col + width) > global->wrap)
+ {
+ if ((*putc_cb)('\n', p) < 0)
+ return (-1);
+
+ col = 0;
+ }
+ else
+ {
+ if ((*putc_cb)(' ', p) < 0)
+ return (-1);
+
+ col ++;
+ }
+
+ if (mxml_write_name(attr->name, p, putc_cb) < 0)
+ return (-1);
+
+ if (attr->value)
+ {
+ if ((*putc_cb)('=', p) < 0)
+ return (-1);
+ if ((*putc_cb)('\"', p) < 0)
+ return (-1);
+ if (mxml_write_string(attr->value, p, putc_cb) < 0)
+ return (-1);
+ if ((*putc_cb)('\"', p) < 0)
+ return (-1);
+ }
+
+ col += width;
+ }
+
+ if (node->child)
+ {
+ /*
+ * Write children...
+ */
+
+ mxml_node_t *child; /* Current child */
+
+
+ if ((*putc_cb)('>', p) < 0)
+ return (-1);
+ else
+ col ++;
+
+ col = mxml_write_ws(node, p, cb, MXML_WS_AFTER_OPEN, col, putc_cb);
+
+ for (child = node->child; child; child = child->next)
+ {
+ if ((col = mxml_write_node(child, p, cb, col, putc_cb, global)) < 0)
+ return (-1);
+ }
+
+ /*
+ * The ? and ! elements are special-cases and have no end tags...
+ */
+
+ if (node->value.element.name[0] != '!' &&
+ node->value.element.name[0] != '?')
+ {
+ col = mxml_write_ws(node, p, cb, MXML_WS_BEFORE_CLOSE, col, putc_cb);
+
+ if ((*putc_cb)('<', p) < 0)
+ return (-1);
+ if ((*putc_cb)('/', p) < 0)
+ return (-1);
+ if (mxml_write_string(node->value.element.name, p, putc_cb) < 0)
+ return (-1);
+ if ((*putc_cb)('>', p) < 0)
+ return (-1);
+
+ col += strlen(node->value.element.name) + 3;
+
+ col = mxml_write_ws(node, p, cb, MXML_WS_AFTER_CLOSE, col, putc_cb);
+ }
+ }
+ else if (node->value.element.name[0] == '!' ||
+ node->value.element.name[0] == '?')
+ {
+ /*
+ * The ? and ! elements are special-cases...
+ */
+
+ if ((*putc_cb)('>', p) < 0)
+ return (-1);
+ else
+ col ++;
+
+ col = mxml_write_ws(node, p, cb, MXML_WS_AFTER_OPEN, col, putc_cb);
+ }
+ else
+ {
+ if ((*putc_cb)(' ', p) < 0)
+ return (-1);
+ if ((*putc_cb)('/', p) < 0)
+ return (-1);
+ if ((*putc_cb)('>', p) < 0)
+ return (-1);
+
+ col += 3;
+
+ col = mxml_write_ws(node, p, cb, MXML_WS_AFTER_OPEN, col, putc_cb);
+ }
+ break;
+
+ case MXML_INTEGER :
+ if (node->prev)
+ {
+ if (global->wrap > 0 && col > global->wrap)
+ {
+ if ((*putc_cb)('\n', p) < 0)
+ return (-1);
+
+ col = 0;
+ }
+ else if ((*putc_cb)(' ', p) < 0)
+ return (-1);
+ else
+ col ++;
+ }
+
+ sprintf(s, "%d", node->value.integer);
+ if (mxml_write_string(s, p, putc_cb) < 0)
+ return (-1);
+
+ col += strlen(s);
+ break;
+
+ case MXML_OPAQUE :
+ if (mxml_write_string(node->value.opaque, p, putc_cb) < 0)
+ return (-1);
+
+ col += strlen(node->value.opaque);
+ break;
+
+ case MXML_REAL :
+ if (node->prev)
+ {
+ if (global->wrap > 0 && col > global->wrap)
+ {
+ if ((*putc_cb)('\n', p) < 0)
+ return (-1);
+
+ col = 0;
+ }
+ else if ((*putc_cb)(' ', p) < 0)
+ return (-1);
+ else
+ col ++;
+ }
+
+ sprintf(s, "%f", node->value.real);
+ if (mxml_write_string(s, p, putc_cb) < 0)
+ return (-1);
+
+ col += strlen(s);
+ break;
+
+ case MXML_TEXT :
+ if (node->value.text.whitespace && col > 0)
+ {
+ if (global->wrap > 0 && col > global->wrap)
+ {
+ if ((*putc_cb)('\n', p) < 0)
+ return (-1);
+
+ col = 0;
+ }
+ else if ((*putc_cb)(' ', p) < 0)
+ return (-1);
+ else
+ col ++;
+ }
+
+ if (mxml_write_string(node->value.text.string, p, putc_cb) < 0)
+ return (-1);
+
+ col += strlen(node->value.text.string);
+ break;
+
+ case MXML_CUSTOM :
+ if (global->custom_save_cb)
+ {
+ char *data; /* Custom data string */
+ const char *newline; /* Last newline in string */
+
+
+ if ((data = (*global->custom_save_cb)(node)) == NULL)
+ return (-1);
+
+ if (mxml_write_string(data, p, putc_cb) < 0)
+ return (-1);
+
+ if ((newline = strrchr(data, '\n')) == NULL)
+ col += strlen(data);
+ else
+ col = strlen(newline);
+
+ free(data);
+ break;
+ }
+
+ default : /* Should never happen */
+ return (-1);
+ }
+
+ return (col);
+}
+
+
+/*
+ * 'mxml_write_string()' - Write a string, escaping & and < as needed.
+ */
+
+static int /* O - 0 on success, -1 on failure */
+mxml_write_string(
+ const char *s, /* I - String to write */
+ void *p, /* I - Write pointer */
+ _mxml_putc_cb_t putc_cb) /* I - Write callback */
+{
+ const char *name; /* Entity name, if any */
+
+
+ while (*s)
+ {
+ if ((name = mxmlEntityGetName(*s)) != NULL)
+ {
+ if ((*putc_cb)('&', p) < 0)
+ return (-1);
+
+ while (*name)
+ {
+ if ((*putc_cb)(*name, p) < 0)
+ return (-1);
+ name ++;
+ }
+
+ if ((*putc_cb)(';', p) < 0)
+ return (-1);
+ }
+ else if ((*putc_cb)(*s, p) < 0)
+ return (-1);
+
+ s ++;
+ }
+
+ return (0);
+}
+
+
+/*
+ * 'mxml_write_ws()' - Do whitespace callback...
+ */
+
+static int /* O - New column */
+mxml_write_ws(mxml_node_t *node, /* I - Current node */
+ void *p, /* I - Write pointer */
+ mxml_save_cb_t cb, /* I - Callback function */
+ int ws, /* I - Where value */
+ int col, /* I - Current column */
+ _mxml_putc_cb_t putc_cb) /* I - Write callback */
+{
+ const char *s; /* Whitespace string */
+
+
+ if (cb && (s = (*cb)(node, ws)) != NULL)
+ {
+ while (*s)
+ {
+ if ((*putc_cb)(*s, p) < 0)
+ return (-1);
+ else if (*s == '\n')
+ col = 0;
+ else if (*s == '\t')
+ {
+ col += MXML_TAB;
+ col = col - (col % MXML_TAB);
+ }
+ else
+ col ++;
+
+ s ++;
+ }
+ }
+
+ return (col);
+}
+
+
+/*
+ * End of "$Id: mxml-file.c 438 2011-03-24 05:47:51Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-get.c b/tools/gator/daemon/mxml/mxml-get.c
new file mode 100644
index 000000000000..a5356d57e186
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-get.c
@@ -0,0 +1,471 @@
+/*
+ * "$Id: mxml-get.c 427 2011-01-03 02:03:29Z mike $"
+ *
+ * Node get functions for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlGetCDATA() - Get the value for a CDATA node.
+ * mxmlGetCustom() - Get the value for a custom node.
+ * mxmlGetElement() - Get the name for an element node.
+ * mxmlGetFirstChild() - Get the first child of an element node.
+ * mxmlGetInteger() - Get the integer value from the specified node or its
+ * first child.
+ * mxmlGetLastChild() - Get the last child of an element node.
+ * mxmlGetNextSibling() - Get the next node for the current parent.
+ * mxmlGetOpaque() - Get an opaque string value for a node or its first
+ * child.
+ * mxmlGetParent() - Get the parent node.
+ * mxmlGetPrevSibling() - Get the previous node for the current parent.
+ * mxmlGetReal() - Get the real value for a node or its first child.
+ * mxmlGetText() - Get the text value for a node or its first child.
+ * mxmlGetType() - Get the node type.
+ * mxmlGetUserData() - Get the user data pointer for a node.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * 'mxmlGetCDATA()' - Get the value for a CDATA node.
+ *
+ * @code NULL@ is returned if the node is not a CDATA element.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+const char * /* O - CDATA value or NULL */
+mxmlGetCDATA(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT ||
+ strncmp(node->value.element.name, "![CDATA[", 8))
+ return (NULL);
+
+ /*
+ * Return the text following the CDATA declaration...
+ */
+
+ return (node->value.element.name + 8);
+}
+
+
+/*
+ * 'mxmlGetCustom()' - Get the value for a custom node.
+ *
+ * @code NULL@ is returned if the node (or its first child) is not a custom
+ * value node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+const void * /* O - Custom value or NULL */
+mxmlGetCustom(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the integer value...
+ */
+
+ if (node->type == MXML_CUSTOM)
+ return (node->value.custom.data);
+ else if (node->type == MXML_ELEMENT &&
+ node->child &&
+ node->child->type == MXML_CUSTOM)
+ return (node->child->value.custom.data);
+ else
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlGetElement()' - Get the name for an element node.
+ *
+ * @code NULL@ is returned if the node is not an element node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+const char * /* O - Element name or NULL */
+mxmlGetElement(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT)
+ return (NULL);
+
+ /*
+ * Return the element name...
+ */
+
+ return (node->value.element.name);
+}
+
+
+/*
+ * 'mxmlGetFirstChild()' - Get the first child of an element node.
+ *
+ * @code NULL@ is returned if the node is not an element node or if the node
+ * has no children.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t * /* O - First child or NULL */
+mxmlGetFirstChild(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT)
+ return (NULL);
+
+ /*
+ * Return the first child node...
+ */
+
+ return (node->child);
+}
+
+
+/*
+ * 'mxmlGetInteger()' - Get the integer value from the specified node or its
+ * first child.
+ *
+ * 0 is returned if the node (or its first child) is not an integer value node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+int /* O - Integer value or 0 */
+mxmlGetInteger(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (0);
+
+ /*
+ * Return the integer value...
+ */
+
+ if (node->type == MXML_INTEGER)
+ return (node->value.integer);
+ else if (node->type == MXML_ELEMENT &&
+ node->child &&
+ node->child->type == MXML_INTEGER)
+ return (node->child->value.integer);
+ else
+ return (0);
+}
+
+
+/*
+ * 'mxmlGetLastChild()' - Get the last child of an element node.
+ *
+ * @code NULL@ is returned if the node is not an element node or if the node
+ * has no children.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t * /* O - Last child or NULL */
+mxmlGetLastChild(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT)
+ return (NULL);
+
+ /*
+ * Return the node type...
+ */
+
+ return (node->last_child);
+}
+
+
+/*
+ * 'mxmlGetNextSibling()' - Get the next node for the current parent.
+ *
+ * @code NULL@ is returned if this is the last child for the current parent.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t *
+mxmlGetNextSibling(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the node type...
+ */
+
+ return (node->next);
+}
+
+
+/*
+ * 'mxmlGetOpaque()' - Get an opaque string value for a node or its first child.
+ *
+ * @code NULL@ is returned if the node (or its first child) is not an opaque
+ * value node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+const char * /* O - Opaque string or NULL */
+mxmlGetOpaque(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the integer value...
+ */
+
+ if (node->type == MXML_OPAQUE)
+ return (node->value.opaque);
+ else if (node->type == MXML_ELEMENT &&
+ node->child &&
+ node->child->type == MXML_OPAQUE)
+ return (node->child->value.opaque);
+ else
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlGetParent()' - Get the parent node.
+ *
+ * @code NULL@ is returned for a root node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t * /* O - Parent node or NULL */
+mxmlGetParent(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the node type...
+ */
+
+ return (node->parent);
+}
+
+
+/*
+ * 'mxmlGetPrevSibling()' - Get the previous node for the current parent.
+ *
+ * @code NULL@ is returned if this is the first child for the current parent.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t * /* O - Previous node or NULL */
+mxmlGetPrevSibling(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the node type...
+ */
+
+ return (node->prev);
+}
+
+
+/*
+ * 'mxmlGetReal()' - Get the real value for a node or its first child.
+ *
+ * 0.0 is returned if the node (or its first child) is not a real value node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+double /* O - Real value or 0.0 */
+mxmlGetReal(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (0.0);
+
+ /*
+ * Return the integer value...
+ */
+
+ if (node->type == MXML_REAL)
+ return (node->value.real);
+ else if (node->type == MXML_ELEMENT &&
+ node->child &&
+ node->child->type == MXML_REAL)
+ return (node->child->value.real);
+ else
+ return (0.0);
+}
+
+
+/*
+ * 'mxmlGetText()' - Get the text value for a node or its first child.
+ *
+ * @code NULL@ is returned if the node (or its first child) is not a text node.
+ * The "whitespace" argument can be NULL.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+const char * /* O - Text string or NULL */
+mxmlGetText(mxml_node_t *node, /* I - Node to get */
+ int *whitespace) /* O - 1 if string is preceded by whitespace, 0 otherwise */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ {
+ if (whitespace)
+ *whitespace = 0;
+
+ return (NULL);
+ }
+
+ /*
+ * Return the integer value...
+ */
+
+ if (node->type == MXML_TEXT)
+ {
+ if (whitespace)
+ *whitespace = node->value.text.whitespace;
+
+ return (node->value.text.string);
+ }
+ else if (node->type == MXML_ELEMENT &&
+ node->child &&
+ node->child->type == MXML_TEXT)
+ {
+ if (whitespace)
+ *whitespace = node->child->value.text.whitespace;
+
+ return (node->child->value.text.string);
+ }
+ else
+ {
+ if (whitespace)
+ *whitespace = 0;
+
+ return (NULL);
+ }
+}
+
+
+/*
+ * 'mxmlGetType()' - Get the node type.
+ *
+ * @code MXML_IGNORE@ is returned if "node" is @code NULL@.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_type_t /* O - Type of node */
+mxmlGetType(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (MXML_IGNORE);
+
+ /*
+ * Return the node type...
+ */
+
+ return (node->type);
+}
+
+
+/*
+ * 'mxmlGetUserData()' - Get the user data pointer for a node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+void * /* O - User data pointer */
+mxmlGetUserData(mxml_node_t *node) /* I - Node to get */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Return the user data pointer...
+ */
+
+ return (node->user_data);
+}
+
+
+/*
+ * End of "$Id: mxml-get.c 427 2011-01-03 02:03:29Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-index.c b/tools/gator/daemon/mxml/mxml-index.c
new file mode 100644
index 000000000000..b6efc66f055c
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-index.c
@@ -0,0 +1,662 @@
+/*
+ * "$Id: mxml-index.c 426 2011-01-01 23:42:17Z mike $"
+ *
+ * Index support code for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * Sort functions...
+ */
+
+static int index_compare(mxml_index_t *ind, mxml_node_t *first,
+ mxml_node_t *second);
+static int index_find(mxml_index_t *ind, const char *element,
+ const char *value, mxml_node_t *node);
+static void index_sort(mxml_index_t *ind, int left, int right);
+
+
+/*
+ * 'mxmlIndexDelete()' - Delete an index.
+ */
+
+void
+mxmlIndexDelete(mxml_index_t *ind) /* I - Index to delete */
+{
+ /*
+ * Range check input..
+ */
+
+ if (!ind)
+ return;
+
+ /*
+ * Free memory...
+ */
+
+ if (ind->attr)
+ free(ind->attr);
+
+ if (ind->alloc_nodes)
+ free(ind->nodes);
+
+ free(ind);
+}
+
+
+/*
+ * 'mxmlIndexEnum()' - Return the next node in the index.
+ *
+ * Nodes are returned in the sorted order of the index.
+ */
+
+mxml_node_t * /* O - Next node or NULL if there is none */
+mxmlIndexEnum(mxml_index_t *ind) /* I - Index to enumerate */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!ind)
+ return (NULL);
+
+ /*
+ * Return the next node...
+ */
+
+ if (ind->cur_node < ind->num_nodes)
+ return (ind->nodes[ind->cur_node ++]);
+ else
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlIndexFind()' - Find the next matching node.
+ *
+ * You should call mxmlIndexReset() prior to using this function for
+ * the first time with a particular set of "element" and "value"
+ * strings. Passing NULL for both "element" and "value" is equivalent
+ * to calling mxmlIndexEnum().
+ */
+
+mxml_node_t * /* O - Node or NULL if none found */
+mxmlIndexFind(mxml_index_t *ind, /* I - Index to search */
+ const char *element, /* I - Element name to find, if any */
+ const char *value) /* I - Attribute value, if any */
+{
+ int diff, /* Difference between names */
+ current, /* Current entity in search */
+ first, /* First entity in search */
+ last; /* Last entity in search */
+
+
+#ifdef DEBUG
+ printf("mxmlIndexFind(ind=%p, element=\"%s\", value=\"%s\")\n",
+ ind, element ? element : "(null)", value ? value : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!ind || (!ind->attr && value))
+ {
+#ifdef DEBUG
+ puts(" returning NULL...");
+ printf(" ind->attr=\"%s\"\n", ind->attr ? ind->attr : "(null)");
+#endif /* DEBUG */
+
+ return (NULL);
+ }
+
+ /*
+ * If both element and value are NULL, just enumerate the nodes in the
+ * index...
+ */
+
+ if (!element && !value)
+ return (mxmlIndexEnum(ind));
+
+ /*
+ * If there are no nodes in the index, return NULL...
+ */
+
+ if (!ind->num_nodes)
+ {
+#ifdef DEBUG
+ puts(" returning NULL...");
+ puts(" no nodes!");
+#endif /* DEBUG */
+
+ return (NULL);
+ }
+
+ /*
+ * If cur_node == 0, then find the first matching node...
+ */
+
+ if (ind->cur_node == 0)
+ {
+ /*
+ * Find the first node using a modified binary search algorithm...
+ */
+
+ first = 0;
+ last = ind->num_nodes - 1;
+
+#ifdef DEBUG
+ printf(" find first time, num_nodes=%d...\n", ind->num_nodes);
+#endif /* DEBUG */
+
+ while ((last - first) > 1)
+ {
+ current = (first + last) / 2;
+
+#ifdef DEBUG
+ printf(" first=%d, last=%d, current=%d\n", first, last, current);
+#endif /* DEBUG */
+
+ if ((diff = index_find(ind, element, value, ind->nodes[current])) == 0)
+ {
+ /*
+ * Found a match, move back to find the first...
+ */
+
+#ifdef DEBUG
+ puts(" match!");
+#endif /* DEBUG */
+
+ while (current > 0 &&
+ !index_find(ind, element, value, ind->nodes[current - 1]))
+ current --;
+
+#ifdef DEBUG
+ printf(" returning first match=%d\n", current);
+#endif /* DEBUG */
+
+ /*
+ * Return the first match and save the index to the next...
+ */
+
+ ind->cur_node = current + 1;
+
+ return (ind->nodes[current]);
+ }
+ else if (diff < 0)
+ last = current;
+ else
+ first = current;
+
+#ifdef DEBUG
+ printf(" diff=%d\n", diff);
+#endif /* DEBUG */
+ }
+
+ /*
+ * If we get this far, then we found exactly 0 or 1 matches...
+ */
+
+ for (current = first; current <= last; current ++)
+ if (!index_find(ind, element, value, ind->nodes[current]))
+ {
+ /*
+ * Found exactly one (or possibly two) match...
+ */
+
+#ifdef DEBUG
+ printf(" returning only match %d...\n", current);
+#endif /* DEBUG */
+
+ ind->cur_node = current + 1;
+
+ return (ind->nodes[current]);
+ }
+
+ /*
+ * No matches...
+ */
+
+ ind->cur_node = ind->num_nodes;
+
+#ifdef DEBUG
+ puts(" returning NULL...");
+#endif /* DEBUG */
+
+ return (NULL);
+ }
+ else if (ind->cur_node < ind->num_nodes &&
+ !index_find(ind, element, value, ind->nodes[ind->cur_node]))
+ {
+ /*
+ * Return the next matching node...
+ */
+
+#ifdef DEBUG
+ printf(" returning next match %d...\n", ind->cur_node);
+#endif /* DEBUG */
+
+ return (ind->nodes[ind->cur_node ++]);
+ }
+
+ /*
+ * If we get this far, then we have no matches...
+ */
+
+ ind->cur_node = ind->num_nodes;
+
+#ifdef DEBUG
+ puts(" returning NULL...");
+#endif /* DEBUG */
+
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlIndexGetCount()' - Get the number of nodes in an index.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+int /* I - Number of nodes in index */
+mxmlIndexGetCount(mxml_index_t *ind) /* I - Index of nodes */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!ind)
+ return (0);
+
+ /*
+ * Return the number of nodes in the index...
+ */
+
+ return (ind->num_nodes);
+}
+
+
+/*
+ * 'mxmlIndexNew()' - Create a new index.
+ *
+ * The index will contain all nodes that contain the named element and/or
+ * attribute. If both "element" and "attr" are NULL, then the index will
+ * contain a sorted list of the elements in the node tree. Nodes are
+ * sorted by element name and optionally by attribute value if the "attr"
+ * argument is not NULL.
+ */
+
+mxml_index_t * /* O - New index */
+mxmlIndexNew(mxml_node_t *node, /* I - XML node tree */
+ const char *element, /* I - Element to index or NULL for all */
+ const char *attr) /* I - Attribute to index or NULL for none */
+{
+ mxml_index_t *ind; /* New index */
+ mxml_node_t *current, /* Current node in index */
+ **temp; /* Temporary node pointer array */
+
+
+ /*
+ * Range check input...
+ */
+
+#ifdef DEBUG
+ printf("mxmlIndexNew(node=%p, element=\"%s\", attr=\"%s\")\n",
+ node, element ? element : "(null)", attr ? attr : "(null)");
+#endif /* DEBUG */
+
+ if (!node)
+ return (NULL);
+
+ /*
+ * Create a new index...
+ */
+
+ if ((ind = calloc(1, sizeof(mxml_index_t))) == NULL)
+ {
+ mxml_error("Unable to allocate %d bytes for index - %s",
+ sizeof(mxml_index_t), strerror(errno));
+ return (NULL);
+ }
+
+ if (attr)
+ ind->attr = strdup(attr);
+
+ if (!element && !attr)
+ current = node;
+ else
+ current = mxmlFindElement(node, node, element, attr, NULL, MXML_DESCEND);
+
+ while (current)
+ {
+ if (ind->num_nodes >= ind->alloc_nodes)
+ {
+ if (!ind->alloc_nodes)
+ temp = malloc(64 * sizeof(mxml_node_t *));
+ else
+ temp = realloc(ind->nodes, (ind->alloc_nodes + 64) * sizeof(mxml_node_t *));
+
+ if (!temp)
+ {
+ /*
+ * Unable to allocate memory for the index, so abort...
+ */
+
+ mxml_error("Unable to allocate %d bytes for index: %s",
+ (ind->alloc_nodes + 64) * sizeof(mxml_node_t *),
+ strerror(errno));
+
+ mxmlIndexDelete(ind);
+ return (NULL);
+ }
+
+ ind->nodes = temp;
+ ind->alloc_nodes += 64;
+ }
+
+ ind->nodes[ind->num_nodes ++] = current;
+
+ current = mxmlFindElement(current, node, element, attr, NULL, MXML_DESCEND);
+ }
+
+ /*
+ * Sort nodes based upon the search criteria...
+ */
+
+#ifdef DEBUG
+ {
+ int i; /* Looping var */
+
+
+ printf("%d node(s) in index.\n\n", ind->num_nodes);
+
+ if (attr)
+ {
+ printf("Node Address Element %s\n", attr);
+ puts("-------- -------- -------------- ------------------------------");
+
+ for (i = 0; i < ind->num_nodes; i ++)
+ printf("%8d %-8p %-14.14s %s\n", i, ind->nodes[i],
+ ind->nodes[i]->value.element.name,
+ mxmlElementGetAttr(ind->nodes[i], attr));
+ }
+ else
+ {
+ puts("Node Address Element");
+ puts("-------- -------- --------------");
+
+ for (i = 0; i < ind->num_nodes; i ++)
+ printf("%8d %-8p %s\n", i, ind->nodes[i],
+ ind->nodes[i]->value.element.name);
+ }
+
+ putchar('\n');
+ }
+#endif /* DEBUG */
+
+ if (ind->num_nodes > 1)
+ index_sort(ind, 0, ind->num_nodes - 1);
+
+#ifdef DEBUG
+ {
+ int i; /* Looping var */
+
+
+ puts("After sorting:\n");
+
+ if (attr)
+ {
+ printf("Node Address Element %s\n", attr);
+ puts("-------- -------- -------------- ------------------------------");
+
+ for (i = 0; i < ind->num_nodes; i ++)
+ printf("%8d %-8p %-14.14s %s\n", i, ind->nodes[i],
+ ind->nodes[i]->value.element.name,
+ mxmlElementGetAttr(ind->nodes[i], attr));
+ }
+ else
+ {
+ puts("Node Address Element");
+ puts("-------- -------- --------------");
+
+ for (i = 0; i < ind->num_nodes; i ++)
+ printf("%8d %-8p %s\n", i, ind->nodes[i],
+ ind->nodes[i]->value.element.name);
+ }
+
+ putchar('\n');
+ }
+#endif /* DEBUG */
+
+ /*
+ * Return the new index...
+ */
+
+ return (ind);
+}
+
+
+/*
+ * 'mxmlIndexReset()' - Reset the enumeration/find pointer in the index and
+ * return the first node in the index.
+ *
+ * This function should be called prior to using mxmlIndexEnum() or
+ * mxmlIndexFind() for the first time.
+ */
+
+mxml_node_t * /* O - First node or NULL if there is none */
+mxmlIndexReset(mxml_index_t *ind) /* I - Index to reset */
+{
+#ifdef DEBUG
+ printf("mxmlIndexReset(ind=%p)\n", ind);
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!ind)
+ return (NULL);
+
+ /*
+ * Set the index to the first element...
+ */
+
+ ind->cur_node = 0;
+
+ /*
+ * Return the first node...
+ */
+
+ if (ind->num_nodes)
+ return (ind->nodes[0]);
+ else
+ return (NULL);
+}
+
+
+/*
+ * 'index_compare()' - Compare two nodes.
+ */
+
+static int /* O - Result of comparison */
+index_compare(mxml_index_t *ind, /* I - Index */
+ mxml_node_t *first, /* I - First node */
+ mxml_node_t *second) /* I - Second node */
+{
+ int diff; /* Difference */
+
+
+ /*
+ * Check the element name...
+ */
+
+ if ((diff = strcmp(first->value.element.name,
+ second->value.element.name)) != 0)
+ return (diff);
+
+ /*
+ * Check the attribute value...
+ */
+
+ if (ind->attr)
+ {
+ if ((diff = strcmp(mxmlElementGetAttr(first, ind->attr),
+ mxmlElementGetAttr(second, ind->attr))) != 0)
+ return (diff);
+ }
+
+ /*
+ * No difference, return 0...
+ */
+
+ return (0);
+}
+
+
+/*
+ * 'index_find()' - Compare a node with index values.
+ */
+
+static int /* O - Result of comparison */
+index_find(mxml_index_t *ind, /* I - Index */
+ const char *element, /* I - Element name or NULL */
+ const char *value, /* I - Attribute value or NULL */
+ mxml_node_t *node) /* I - Node */
+{
+ int diff; /* Difference */
+
+
+ /*
+ * Check the element name...
+ */
+
+ if (element)
+ {
+ if ((diff = strcmp(element, node->value.element.name)) != 0)
+ return (diff);
+ }
+
+ /*
+ * Check the attribute value...
+ */
+
+ if (value)
+ {
+ if ((diff = strcmp(value, mxmlElementGetAttr(node, ind->attr))) != 0)
+ return (diff);
+ }
+
+ /*
+ * No difference, return 0...
+ */
+
+ return (0);
+}
+
+
+/*
+ * 'index_sort()' - Sort the nodes in the index...
+ *
+ * This function implements the classic quicksort algorithm...
+ */
+
+static void
+index_sort(mxml_index_t *ind, /* I - Index to sort */
+ int left, /* I - Left node in partition */
+ int right) /* I - Right node in partition */
+{
+ mxml_node_t *pivot, /* Pivot node */
+ *temp; /* Swap node */
+ int templ, /* Temporary left node */
+ tempr; /* Temporary right node */
+
+
+ /*
+ * Loop until we have sorted all the way to the right...
+ */
+
+ do
+ {
+ /*
+ * Sort the pivot in the current partition...
+ */
+
+ pivot = ind->nodes[left];
+
+ for (templ = left, tempr = right; templ < tempr;)
+ {
+ /*
+ * Move left while left node <= pivot node...
+ */
+
+ while ((templ < right) &&
+ index_compare(ind, ind->nodes[templ], pivot) <= 0)
+ templ ++;
+
+ /*
+ * Move right while right node > pivot node...
+ */
+
+ while ((tempr > left) &&
+ index_compare(ind, ind->nodes[tempr], pivot) > 0)
+ tempr --;
+
+ /*
+ * Swap nodes if needed...
+ */
+
+ if (templ < tempr)
+ {
+ temp = ind->nodes[templ];
+ ind->nodes[templ] = ind->nodes[tempr];
+ ind->nodes[tempr] = temp;
+ }
+ }
+
+ /*
+ * When we get here, the right (tempr) node is the new position for the
+ * pivot node...
+ */
+
+ if (index_compare(ind, pivot, ind->nodes[tempr]) > 0)
+ {
+ ind->nodes[left] = ind->nodes[tempr];
+ ind->nodes[tempr] = pivot;
+ }
+
+ /*
+ * Recursively sort the left partition as needed...
+ */
+
+ if (left < (tempr - 1))
+ index_sort(ind, left, tempr - 1);
+ }
+ while (right > (left = tempr + 1));
+}
+
+
+/*
+ * End of "$Id: mxml-index.c 426 2011-01-01 23:42:17Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-node.c b/tools/gator/daemon/mxml/mxml-node.c
new file mode 100644
index 000000000000..44af759f9de3
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-node.c
@@ -0,0 +1,807 @@
+/*
+ * "$Id: mxml-node.c 436 2011-01-22 01:02:05Z mike $"
+ *
+ * Node support code for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlAdd() - Add a node to a tree.
+ * mxmlDelete() - Delete a node and all of its children.
+ * mxmlGetRefCount() - Get the current reference (use) count for a node.
+ * mxmlNewCDATA() - Create a new CDATA node.
+ * mxmlNewCustom() - Create a new custom data node.
+ * mxmlNewElement() - Create a new element node.
+ * mxmlNewInteger() - Create a new integer node.
+ * mxmlNewOpaque() - Create a new opaque string.
+ * mxmlNewReal() - Create a new real number node.
+ * mxmlNewText() - Create a new text fragment node.
+ * mxmlNewTextf() - Create a new formatted text fragment node.
+ * mxmlRemove() - Remove a node from its parent.
+ * mxmlNewXML() - Create a new XML document tree.
+ * mxmlRelease() - Release a node.
+ * mxmlRetain() - Retain a node.
+ * mxml_new() - Create a new node.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * Local functions...
+ */
+
+static mxml_node_t *mxml_new(mxml_node_t *parent, mxml_type_t type);
+
+
+/*
+ * 'mxmlAdd()' - Add a node to a tree.
+ *
+ * Adds the specified node to the parent. If the child argument is not
+ * NULL, puts the new node before or after the specified child depending
+ * on the value of the where argument. If the child argument is NULL,
+ * puts the new node at the beginning of the child list (MXML_ADD_BEFORE)
+ * or at the end of the child list (MXML_ADD_AFTER). The constant
+ * MXML_ADD_TO_PARENT can be used to specify a NULL child pointer.
+ */
+
+void
+mxmlAdd(mxml_node_t *parent, /* I - Parent node */
+ int where, /* I - Where to add, MXML_ADD_BEFORE or MXML_ADD_AFTER */
+ mxml_node_t *child, /* I - Child node for where or MXML_ADD_TO_PARENT */
+ mxml_node_t *node) /* I - Node to add */
+{
+#ifdef DEBUG
+ fprintf(stderr, "mxmlAdd(parent=%p, where=%d, child=%p, node=%p)\n", parent,
+ where, child, node);
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!parent || !node)
+ return;
+
+#if DEBUG > 1
+ fprintf(stderr, " BEFORE: node->parent=%p\n", node->parent);
+ if (parent)
+ {
+ fprintf(stderr, " BEFORE: parent->child=%p\n", parent->child);
+ fprintf(stderr, " BEFORE: parent->last_child=%p\n", parent->last_child);
+ fprintf(stderr, " BEFORE: parent->prev=%p\n", parent->prev);
+ fprintf(stderr, " BEFORE: parent->next=%p\n", parent->next);
+ }
+#endif /* DEBUG > 1 */
+
+ /*
+ * Remove the node from any existing parent...
+ */
+
+ if (node->parent)
+ mxmlRemove(node);
+
+ /*
+ * Reset pointers...
+ */
+
+ node->parent = parent;
+
+ switch (where)
+ {
+ case MXML_ADD_BEFORE :
+ if (!child || child == parent->child || child->parent != parent)
+ {
+ /*
+ * Insert as first node under parent...
+ */
+
+ node->next = parent->child;
+
+ if (parent->child)
+ parent->child->prev = node;
+ else
+ parent->last_child = node;
+
+ parent->child = node;
+ }
+ else
+ {
+ /*
+ * Insert node before this child...
+ */
+
+ node->next = child;
+ node->prev = child->prev;
+
+ if (child->prev)
+ child->prev->next = node;
+ else
+ parent->child = node;
+
+ child->prev = node;
+ }
+ break;
+
+ case MXML_ADD_AFTER :
+ if (!child || child == parent->last_child || child->parent != parent)
+ {
+ /*
+ * Insert as last node under parent...
+ */
+
+ node->parent = parent;
+ node->prev = parent->last_child;
+
+ if (parent->last_child)
+ parent->last_child->next = node;
+ else
+ parent->child = node;
+
+ parent->last_child = node;
+ }
+ else
+ {
+ /*
+ * Insert node after this child...
+ */
+
+ node->prev = child;
+ node->next = child->next;
+
+ if (child->next)
+ child->next->prev = node;
+ else
+ parent->last_child = node;
+
+ child->next = node;
+ }
+ break;
+ }
+
+#if DEBUG > 1
+ fprintf(stderr, " AFTER: node->parent=%p\n", node->parent);
+ if (parent)
+ {
+ fprintf(stderr, " AFTER: parent->child=%p\n", parent->child);
+ fprintf(stderr, " AFTER: parent->last_child=%p\n", parent->last_child);
+ fprintf(stderr, " AFTER: parent->prev=%p\n", parent->prev);
+ fprintf(stderr, " AFTER: parent->next=%p\n", parent->next);
+ }
+#endif /* DEBUG > 1 */
+}
+
+
+/*
+ * 'mxmlDelete()' - Delete a node and all of its children.
+ *
+ * If the specified node has a parent, this function first removes the
+ * node from its parent using the mxmlRemove() function.
+ */
+
+void
+mxmlDelete(mxml_node_t *node) /* I - Node to delete */
+{
+ int i; /* Looping var */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlDelete(node=%p)\n", node);
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return;
+
+ /*
+ * Remove the node from its parent, if any...
+ */
+
+ mxmlRemove(node);
+
+ /*
+ * Delete children...
+ */
+
+ while (node->child)
+ mxmlDelete(node->child);
+
+ /*
+ * Now delete any node data...
+ */
+
+ switch (node->type)
+ {
+ case MXML_ELEMENT :
+ if (node->value.element.name)
+ free(node->value.element.name);
+
+ if (node->value.element.num_attrs)
+ {
+ for (i = 0; i < node->value.element.num_attrs; i ++)
+ {
+ if (node->value.element.attrs[i].name)
+ free(node->value.element.attrs[i].name);
+ if (node->value.element.attrs[i].value)
+ free(node->value.element.attrs[i].value);
+ }
+
+ free(node->value.element.attrs);
+ }
+ break;
+ case MXML_INTEGER :
+ /* Nothing to do */
+ break;
+ case MXML_OPAQUE :
+ if (node->value.opaque)
+ free(node->value.opaque);
+ break;
+ case MXML_REAL :
+ /* Nothing to do */
+ break;
+ case MXML_TEXT :
+ if (node->value.text.string)
+ free(node->value.text.string);
+ break;
+ case MXML_CUSTOM :
+ if (node->value.custom.data &&
+ node->value.custom.destroy)
+ (*(node->value.custom.destroy))(node->value.custom.data);
+ break;
+ default :
+ break;
+ }
+
+ /*
+ * Free this node...
+ */
+
+ free(node);
+}
+
+
+/*
+ * 'mxmlGetRefCount()' - Get the current reference (use) count for a node.
+ *
+ * The initial reference count of new nodes is 1. Use the @link mxmlRetain@
+ * and @link mxmlRelease@ functions to increment and decrement a node's
+ * reference count.
+ *
+ * @since Mini-XML 2.7@.
+ */
+
+int /* O - Reference count */
+mxmlGetRefCount(mxml_node_t *node) /* I - Node */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (0);
+
+ /*
+ * Return the reference count...
+ */
+
+ return (node->ref_count);
+}
+
+
+/*
+ * 'mxmlNewCDATA()' - Create a new CDATA node.
+ *
+ * The new CDATA node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * CDATA node has no parent. The data string must be nul-terminated and
+ * is copied into the new node. CDATA nodes use the MXML_ELEMENT type.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewCDATA(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ const char *data) /* I - Data string */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewCDATA(parent=%p, data=\"%s\")\n",
+ parent, data ? data : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!data)
+ return (NULL);
+
+ /*
+ * Create the node and set the name value...
+ */
+
+ if ((node = mxml_new(parent, MXML_ELEMENT)) != NULL)
+ node->value.element.name = _mxml_strdupf("![CDATA[%s]]", data);
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewCustom()' - Create a new custom data node.
+ *
+ * The new custom node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * element node has no parent. NULL can be passed when the data in the
+ * node is not dynamically allocated or is separately managed.
+ *
+ * @since Mini-XML 2.1@
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewCustom(
+ mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ void *data, /* I - Pointer to data */
+ mxml_custom_destroy_cb_t destroy) /* I - Function to destroy data */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewCustom(parent=%p, data=%p, destroy=%p)\n", parent,
+ data, destroy);
+#endif /* DEBUG */
+
+ /*
+ * Create the node and set the value...
+ */
+
+ if ((node = mxml_new(parent, MXML_CUSTOM)) != NULL)
+ {
+ node->value.custom.data = data;
+ node->value.custom.destroy = destroy;
+ }
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewElement()' - Create a new element node.
+ *
+ * The new element node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * element node has no parent.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewElement(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ const char *name) /* I - Name of element */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewElement(parent=%p, name=\"%s\")\n", parent,
+ name ? name : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!name)
+ return (NULL);
+
+ /*
+ * Create the node and set the element name...
+ */
+
+ if ((node = mxml_new(parent, MXML_ELEMENT)) != NULL)
+ node->value.element.name = strdup(name);
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewInteger()' - Create a new integer node.
+ *
+ * The new integer node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * integer node has no parent.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewInteger(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ int integer) /* I - Integer value */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewInteger(parent=%p, integer=%d)\n", parent, integer);
+#endif /* DEBUG */
+
+ /*
+ * Create the node and set the element name...
+ */
+
+ if ((node = mxml_new(parent, MXML_INTEGER)) != NULL)
+ node->value.integer = integer;
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewOpaque()' - Create a new opaque string.
+ *
+ * The new opaque node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * opaque node has no parent. The opaque string must be nul-terminated and
+ * is copied into the new node.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewOpaque(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ const char *opaque) /* I - Opaque string */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewOpaque(parent=%p, opaque=\"%s\")\n", parent,
+ opaque ? opaque : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!opaque)
+ return (NULL);
+
+ /*
+ * Create the node and set the element name...
+ */
+
+ if ((node = mxml_new(parent, MXML_OPAQUE)) != NULL)
+ node->value.opaque = strdup(opaque);
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewReal()' - Create a new real number node.
+ *
+ * The new real number node is added to the end of the specified parent's
+ * child list. The constant MXML_NO_PARENT can be used to specify that
+ * the new real number node has no parent.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewReal(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ double real) /* I - Real number value */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewReal(parent=%p, real=%g)\n", parent, real);
+#endif /* DEBUG */
+
+ /*
+ * Create the node and set the element name...
+ */
+
+ if ((node = mxml_new(parent, MXML_REAL)) != NULL)
+ node->value.real = real;
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewText()' - Create a new text fragment node.
+ *
+ * The new text node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * text node has no parent. The whitespace parameter is used to specify
+ * whether leading whitespace is present before the node. The text
+ * string must be nul-terminated and is copied into the new node.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewText(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ int whitespace, /* I - 1 = leading whitespace, 0 = no whitespace */
+ const char *string) /* I - String */
+{
+ mxml_node_t *node; /* New node */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewText(parent=%p, whitespace=%d, string=\"%s\")\n",
+ parent, whitespace, string ? string : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!string)
+ return (NULL);
+
+ /*
+ * Create the node and set the text value...
+ */
+
+ if ((node = mxml_new(parent, MXML_TEXT)) != NULL)
+ {
+ node->value.text.whitespace = whitespace;
+ node->value.text.string = strdup(string);
+ }
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlNewTextf()' - Create a new formatted text fragment node.
+ *
+ * The new text node is added to the end of the specified parent's child
+ * list. The constant MXML_NO_PARENT can be used to specify that the new
+ * text node has no parent. The whitespace parameter is used to specify
+ * whether leading whitespace is present before the node. The format
+ * string must be nul-terminated and is formatted into the new node.
+ */
+
+mxml_node_t * /* O - New node */
+mxmlNewTextf(mxml_node_t *parent, /* I - Parent node or MXML_NO_PARENT */
+ int whitespace, /* I - 1 = leading whitespace, 0 = no whitespace */
+ const char *format, /* I - Printf-style frmat string */
+ ...) /* I - Additional args as needed */
+{
+ mxml_node_t *node; /* New node */
+ va_list ap; /* Pointer to arguments */
+
+
+#ifdef DEBUG
+ fprintf(stderr, "mxmlNewTextf(parent=%p, whitespace=%d, format=\"%s\", ...)\n",
+ parent, whitespace, format ? format : "(null)");
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!format)
+ return (NULL);
+
+ /*
+ * Create the node and set the text value...
+ */
+
+ if ((node = mxml_new(parent, MXML_TEXT)) != NULL)
+ {
+ va_start(ap, format);
+
+ node->value.text.whitespace = whitespace;
+ node->value.text.string = _mxml_vstrdupf(format, ap);
+
+ va_end(ap);
+ }
+
+ return (node);
+}
+
+
+/*
+ * 'mxmlRemove()' - Remove a node from its parent.
+ *
+ * Does not free memory used by the node - use mxmlDelete() for that.
+ * This function does nothing if the node has no parent.
+ */
+
+void
+mxmlRemove(mxml_node_t *node) /* I - Node to remove */
+{
+#ifdef DEBUG
+ fprintf(stderr, "mxmlRemove(node=%p)\n", node);
+#endif /* DEBUG */
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || !node->parent)
+ return;
+
+ /*
+ * Remove from parent...
+ */
+
+#if DEBUG > 1
+ fprintf(stderr, " BEFORE: node->parent=%p\n", node->parent);
+ if (node->parent)
+ {
+ fprintf(stderr, " BEFORE: node->parent->child=%p\n", node->parent->child);
+ fprintf(stderr, " BEFORE: node->parent->last_child=%p\n", node->parent->last_child);
+ }
+ fprintf(stderr, " BEFORE: node->child=%p\n", node->child);
+ fprintf(stderr, " BEFORE: node->last_child=%p\n", node->last_child);
+ fprintf(stderr, " BEFORE: node->prev=%p\n", node->prev);
+ fprintf(stderr, " BEFORE: node->next=%p\n", node->next);
+#endif /* DEBUG > 1 */
+
+ if (node->prev)
+ node->prev->next = node->next;
+ else
+ node->parent->child = node->next;
+
+ if (node->next)
+ node->next->prev = node->prev;
+ else
+ node->parent->last_child = node->prev;
+
+ node->parent = NULL;
+ node->prev = NULL;
+ node->next = NULL;
+
+#if DEBUG > 1
+ fprintf(stderr, " AFTER: node->parent=%p\n", node->parent);
+ if (node->parent)
+ {
+ fprintf(stderr, " AFTER: node->parent->child=%p\n", node->parent->child);
+ fprintf(stderr, " AFTER: node->parent->last_child=%p\n", node->parent->last_child);
+ }
+ fprintf(stderr, " AFTER: node->child=%p\n", node->child);
+ fprintf(stderr, " AFTER: node->last_child=%p\n", node->last_child);
+ fprintf(stderr, " AFTER: node->prev=%p\n", node->prev);
+ fprintf(stderr, " AFTER: node->next=%p\n", node->next);
+#endif /* DEBUG > 1 */
+}
+
+
+/*
+ * 'mxmlNewXML()' - Create a new XML document tree.
+ *
+ * The "version" argument specifies the version number to put in the
+ * ?xml element node. If NULL, version 1.0 is assumed.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+mxml_node_t * /* O - New ?xml node */
+mxmlNewXML(const char *version) /* I - Version number to use */
+{
+ char element[1024]; /* Element text */
+
+
+ snprintf(element, sizeof(element), "?xml version=\"%s\" encoding=\"utf-8\"?",
+ version ? version : "1.0");
+
+ return (mxmlNewElement(NULL, element));
+}
+
+
+/*
+ * 'mxmlRelease()' - Release a node.
+ *
+ * When the reference count reaches zero, the node (and any children)
+ * is deleted via mxmlDelete().
+ *
+ * @since Mini-XML 2.3@
+ */
+
+int /* O - New reference count */
+mxmlRelease(mxml_node_t *node) /* I - Node */
+{
+ if (node)
+ {
+ if ((-- node->ref_count) <= 0)
+ {
+ mxmlDelete(node);
+ return (0);
+ }
+ else
+ return (node->ref_count);
+ }
+ else
+ return (-1);
+}
+
+
+/*
+ * 'mxmlRetain()' - Retain a node.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+int /* O - New reference count */
+mxmlRetain(mxml_node_t *node) /* I - Node */
+{
+ if (node)
+ return (++ node->ref_count);
+ else
+ return (-1);
+}
+
+
+/*
+ * 'mxml_new()' - Create a new node.
+ */
+
+static mxml_node_t * /* O - New node */
+mxml_new(mxml_node_t *parent, /* I - Parent node */
+ mxml_type_t type) /* I - Node type */
+{
+ mxml_node_t *node; /* New node */
+
+
+#if DEBUG > 1
+ fprintf(stderr, "mxml_new(parent=%p, type=%d)\n", parent, type);
+#endif /* DEBUG > 1 */
+
+ /*
+ * Allocate memory for the node...
+ */
+
+ if ((node = calloc(1, sizeof(mxml_node_t))) == NULL)
+ {
+#if DEBUG > 1
+ fputs(" returning NULL\n", stderr);
+#endif /* DEBUG > 1 */
+
+ return (NULL);
+ }
+
+#if DEBUG > 1
+ fprintf(stderr, " returning %p\n", node);
+#endif /* DEBUG > 1 */
+
+ /*
+ * Set the node type...
+ */
+
+ node->type = type;
+ node->ref_count = 1;
+
+ /*
+ * Add to the parent if present...
+ */
+
+ if (parent)
+ mxmlAdd(parent, MXML_ADD_AFTER, MXML_ADD_TO_PARENT, node);
+
+ /*
+ * Return the new node...
+ */
+
+ return (node);
+}
+
+
+/*
+ * End of "$Id: mxml-node.c 436 2011-01-22 01:02:05Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-private.c b/tools/gator/daemon/mxml/mxml-private.c
new file mode 100644
index 000000000000..72f3e2320c7c
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-private.c
@@ -0,0 +1,331 @@
+/*
+ * "$Id: mxml-private.c 422 2010-11-07 22:55:11Z mike $"
+ *
+ * Private functions for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxml_error() - Display an error message.
+ * mxml_integer_cb() - Default callback for integer values.
+ * mxml_opaque_cb() - Default callback for opaque values.
+ * mxml_real_cb() - Default callback for real number values.
+ * _mxml_global() - Get global data.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "mxml-private.h"
+
+
+/*
+ * Some crazy people think that unloading a shared object is a good or safe
+ * thing to do. Unfortunately, most objects are simply *not* safe to unload
+ * and bad things *will* happen.
+ *
+ * The following mess of conditional code allows us to provide a destructor
+ * function in Mini-XML for our thread-global storage so that it can possibly
+ * be unloaded safely, although since there is no standard way to do so I
+ * can't even provide any guarantees that you can do it safely on all platforms.
+ *
+ * This code currently supports AIX, HP-UX, Linux, Mac OS X, Solaris, and
+ * Windows. It might work on the BSDs and IRIX, but I haven't tested that.
+ */
+
+#if defined(__sun) || defined(_AIX)
+# pragma fini(_mxml_fini)
+# define _MXML_FINI _mxml_fini
+#elif defined(__hpux)
+# pragma FINI _mxml_fini
+# define _MXML_FINI _mxml_fini
+#elif defined(__GNUC__) /* Linux and Mac OS X */
+# define _MXML_FINI __attribute((destructor)) _mxml_fini
+#else
+# define _MXML_FINI _fini
+#endif /* __sun */
+
+
+/*
+ * 'mxml_error()' - Display an error message.
+ */
+
+void
+mxml_error(const char *format, /* I - Printf-style format string */
+ ...) /* I - Additional arguments as needed */
+{
+ va_list ap; /* Pointer to arguments */
+ char s[1024]; /* Message string */
+ _mxml_global_t *global = _mxml_global();
+ /* Global data */
+
+
+ /*
+ * Range check input...
+ */
+
+ if (!format)
+ return;
+
+ /*
+ * Format the error message string...
+ */
+
+ va_start(ap, format);
+
+ vsnprintf(s, sizeof(s), format, ap);
+
+ va_end(ap);
+
+ /*
+ * And then display the error message...
+ */
+
+ if (global->error_cb)
+ (*global->error_cb)(s);
+ else
+ fprintf(stderr, "mxml: %s\n", s);
+}
+
+
+/*
+ * 'mxml_ignore_cb()' - Default callback for ignored values.
+ */
+
+mxml_type_t /* O - Node type */
+mxml_ignore_cb(mxml_node_t *node) /* I - Current node */
+{
+ (void)node;
+
+ return (MXML_IGNORE);
+}
+
+
+/*
+ * 'mxml_integer_cb()' - Default callback for integer values.
+ */
+
+mxml_type_t /* O - Node type */
+mxml_integer_cb(mxml_node_t *node) /* I - Current node */
+{
+ (void)node;
+
+ return (MXML_INTEGER);
+}
+
+
+/*
+ * 'mxml_opaque_cb()' - Default callback for opaque values.
+ */
+
+mxml_type_t /* O - Node type */
+mxml_opaque_cb(mxml_node_t *node) /* I - Current node */
+{
+ (void)node;
+
+ return (MXML_OPAQUE);
+}
+
+
+/*
+ * 'mxml_real_cb()' - Default callback for real number values.
+ */
+
+mxml_type_t /* O - Node type */
+mxml_real_cb(mxml_node_t *node) /* I - Current node */
+{
+ (void)node;
+
+ return (MXML_REAL);
+}
+
+
+#ifdef HAVE_PTHREAD_H /**** POSIX threading ****/
+# include <pthread.h>
+
+static pthread_key_t _mxml_key = -1; /* Thread local storage key */
+static pthread_once_t _mxml_key_once = PTHREAD_ONCE_INIT;
+ /* One-time initialization object */
+static void _mxml_init(void);
+static void _mxml_destructor(void *g);
+
+
+/*
+ * '_mxml_destructor()' - Free memory used for globals...
+ */
+
+static void
+_mxml_destructor(void *g) /* I - Global data */
+{
+ free(g);
+}
+
+
+/*
+ * '_mxml_fini()' - Clean up when unloaded.
+ */
+
+static void
+_MXML_FINI(void)
+{
+ _mxml_global_t *global; /* Global data */
+
+
+ if (_mxml_key != -1)
+ {
+ if ((global = (_mxml_global_t *)pthread_getspecific(_mxml_key)) != NULL)
+ _mxml_destructor(global);
+
+ pthread_key_delete(_mxml_key);
+ _mxml_key = -1;
+ }
+}
+
+
+/*
+ * '_mxml_global()' - Get global data.
+ */
+
+_mxml_global_t * /* O - Global data */
+_mxml_global(void)
+{
+ _mxml_global_t *global; /* Global data */
+
+
+ pthread_once(&_mxml_key_once, _mxml_init);
+
+ if ((global = (_mxml_global_t *)pthread_getspecific(_mxml_key)) == NULL)
+ {
+ global = (_mxml_global_t *)calloc(1, sizeof(_mxml_global_t));
+ pthread_setspecific(_mxml_key, global);
+
+ global->num_entity_cbs = 1;
+ global->entity_cbs[0] = _mxml_entity_cb;
+ global->wrap = 72;
+ }
+
+ return (global);
+}
+
+
+/*
+ * '_mxml_init()' - Initialize global data...
+ */
+
+static void
+_mxml_init(void)
+{
+ pthread_key_create(&_mxml_key, _mxml_destructor);
+}
+
+
+#elif defined(WIN32) && defined(MXML1_EXPORTS) /**** WIN32 threading ****/
+# include <windows.h>
+
+static DWORD _mxml_tls_index; /* Index for global storage */
+
+
+/*
+ * 'DllMain()' - Main entry for library.
+ */
+
+BOOL WINAPI /* O - Success/failure */
+DllMain(HINSTANCE hinst, /* I - DLL module handle */
+ DWORD reason, /* I - Reason */
+ LPVOID reserved) /* I - Unused */
+{
+ _mxml_global_t *global; /* Global data */
+
+
+ (void)hinst;
+ (void)reserved;
+
+ switch (reason)
+ {
+ case DLL_PROCESS_ATTACH : /* Called on library initialization */
+ if ((_mxml_tls_index = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ return (FALSE);
+ break;
+
+ case DLL_THREAD_DETACH : /* Called when a thread terminates */
+ if ((global = (_mxml_global_t *)TlsGetValue(_mxml_tls_index)) != NULL)
+ free(global);
+ break;
+
+ case DLL_PROCESS_DETACH : /* Called when library is unloaded */
+ if ((global = (_mxml_global_t *)TlsGetValue(_mxml_tls_index)) != NULL)
+ free(global);
+
+ TlsFree(_mxml_tls_index);
+ break;
+
+ default:
+ break;
+ }
+
+ return (TRUE);
+}
+
+
+/*
+ * '_mxml_global()' - Get global data.
+ */
+
+_mxml_global_t * /* O - Global data */
+_mxml_global(void)
+{
+ _mxml_global_t *global; /* Global data */
+
+
+ if ((global = (_mxml_global_t *)TlsGetValue(_mxml_tls_index)) == NULL)
+ {
+ global = (_mxml_global_t *)calloc(1, sizeof(_mxml_global_t));
+
+ global->num_entity_cbs = 1;
+ global->entity_cbs[0] = _mxml_entity_cb;
+ global->wrap = 72;
+
+ TlsSetValue(_mxml_tls_index, (LPVOID)global);
+ }
+
+ return (global);
+}
+
+
+#else /**** No threading ****/
+/*
+ * '_mxml_global()' - Get global data.
+ */
+
+_mxml_global_t * /* O - Global data */
+_mxml_global(void)
+{
+ static _mxml_global_t global = /* Global data */
+ {
+ NULL, /* error_cb */
+ 1, /* num_entity_cbs */
+ { _mxml_entity_cb }, /* entity_cbs */
+ 72, /* wrap */
+ NULL, /* custom_load_cb */
+ NULL /* custom_save_cb */
+ };
+
+
+ return (&global);
+}
+#endif /* HAVE_PTHREAD_H */
+
+
+/*
+ * End of "$Id: mxml-private.c 422 2010-11-07 22:55:11Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-private.h b/tools/gator/daemon/mxml/mxml-private.h
new file mode 100644
index 000000000000..8789e6c52cbd
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-private.h
@@ -0,0 +1,50 @@
+/*
+ * "$Id: mxml-private.h 408 2010-09-19 05:26:46Z mike $"
+ *
+ * Private definitions for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * Global, per-thread data...
+ */
+
+typedef struct _mxml_global_s
+{
+ void (*error_cb)(const char *);
+ int num_entity_cbs;
+ int (*entity_cbs[100])(const char *name);
+ int wrap;
+ mxml_custom_load_cb_t custom_load_cb;
+ mxml_custom_save_cb_t custom_save_cb;
+} _mxml_global_t;
+
+
+/*
+ * Functions...
+ */
+
+extern _mxml_global_t *_mxml_global(void);
+extern int _mxml_entity_cb(const char *name);
+
+
+/*
+ * End of "$Id: mxml-private.h 408 2010-09-19 05:26:46Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-search.c b/tools/gator/daemon/mxml/mxml-search.c
new file mode 100644
index 000000000000..f975af1543ca
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-search.c
@@ -0,0 +1,287 @@
+/*
+ * "$Id: mxml-search.c 427 2011-01-03 02:03:29Z mike $"
+ *
+ * Search/navigation functions for Mini-XML, a small XML-like file
+ * parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlFindElement() - Find the named element.
+ * mxmlFindValue() - Find a value with the given path.
+ * mxmlWalkNext() - Walk to the next logical node in the tree.
+ * mxmlWalkPrev() - Walk to the previous logical node in the tree.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * 'mxmlFindElement()' - Find the named element.
+ *
+ * The search is constrained by the name, attribute name, and value; any
+ * NULL names or values are treated as wildcards, so different kinds of
+ * searches can be implemented by looking for all elements of a given name
+ * or all elements with a specific attribute. The descend argument determines
+ * whether the search descends into child nodes; normally you will use
+ * MXML_DESCEND_FIRST for the initial search and MXML_NO_DESCEND to find
+ * additional direct descendents of the node. The top node argument
+ * constrains the search to a particular node's children.
+ */
+
+mxml_node_t * /* O - Element node or NULL */
+mxmlFindElement(mxml_node_t *node, /* I - Current node */
+ mxml_node_t *top, /* I - Top node */
+ const char *name, /* I - Element name or NULL for any */
+ const char *attr, /* I - Attribute name, or NULL for none */
+ const char *value, /* I - Attribute value, or NULL for any */
+ int descend) /* I - Descend into tree - MXML_DESCEND, MXML_NO_DESCEND, or MXML_DESCEND_FIRST */
+{
+ const char *temp; /* Current attribute value */
+
+
+ /*
+ * Range check input...
+ */
+
+ if (!node || !top || (!attr && value))
+ return (NULL);
+
+ /*
+ * Start with the next node...
+ */
+
+ node = mxmlWalkNext(node, top, descend);
+
+ /*
+ * Loop until we find a matching element...
+ */
+
+ while (node != NULL)
+ {
+ /*
+ * See if this node matches...
+ */
+
+ if (node->type == MXML_ELEMENT &&
+ node->value.element.name &&
+ (!name || !strcmp(node->value.element.name, name)))
+ {
+ /*
+ * See if we need to check for an attribute...
+ */
+
+ if (!attr)
+ return (node); /* No attribute search, return it... */
+
+ /*
+ * Check for the attribute...
+ */
+
+ if ((temp = mxmlElementGetAttr(node, attr)) != NULL)
+ {
+ /*
+ * OK, we have the attribute, does it match?
+ */
+
+ if (!value || !strcmp(value, temp))
+ return (node); /* Yes, return it... */
+ }
+ }
+
+ /*
+ * No match, move on to the next node...
+ */
+
+ if (descend == MXML_DESCEND)
+ node = mxmlWalkNext(node, top, MXML_DESCEND);
+ else
+ node = node->next;
+ }
+
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlFindPath()' - Find a node with the given path.
+ *
+ * The "path" is a slash-separated list of element names. The name "*" is
+ * considered a wildcard for one or more levels of elements. For example,
+ * "foo/one/two", "bar/two/one", "*\/one", and so forth.
+ *
+ * The first child node of the found node is returned if the given node has
+ * children and the first child is a value node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+mxml_node_t * /* O - Found node or NULL */
+mxmlFindPath(mxml_node_t *top, /* I - Top node */
+ const char *path) /* I - Path to element */
+{
+ mxml_node_t *node; /* Current node */
+ char element[256]; /* Current element name */
+ const char *pathsep; /* Separator in path */
+ int descend; /* mxmlFindElement option */
+
+
+ /*
+ * Range check input...
+ */
+
+ if (!top || !path || !*path)
+ return (NULL);
+
+ /*
+ * Search each element in the path...
+ */
+
+ node = top;
+ while (*path)
+ {
+ /*
+ * Handle wildcards...
+ */
+
+ if (!strncmp(path, "*/", 2))
+ {
+ path += 2;
+ descend = MXML_DESCEND;
+ }
+ else
+ descend = MXML_DESCEND_FIRST;
+
+ /*
+ * Get the next element in the path...
+ */
+
+ if ((pathsep = strchr(path, '/')) == NULL)
+ pathsep = path + strlen(path);
+
+ if (pathsep == path || (pathsep - path) >= sizeof(element))
+ return (NULL);
+
+ memcpy(element, path, pathsep - path);
+ element[pathsep - path] = '\0';
+
+ if (*pathsep)
+ path = pathsep + 1;
+ else
+ path = pathsep;
+
+ /*
+ * Search for the element...
+ */
+
+ if ((node = mxmlFindElement(node, node, element, NULL, NULL,
+ descend)) == NULL)
+ return (NULL);
+ }
+
+ /*
+ * If we get this far, return the node or its first child...
+ */
+
+ if (node->child && node->child->type != MXML_ELEMENT)
+ return (node->child);
+ else
+ return (node);
+}
+
+
+/*
+ * 'mxmlWalkNext()' - Walk to the next logical node in the tree.
+ *
+ * The descend argument controls whether the first child is considered
+ * to be the next node. The top node argument constrains the walk to
+ * the node's children.
+ */
+
+mxml_node_t * /* O - Next node or NULL */
+mxmlWalkNext(mxml_node_t *node, /* I - Current node */
+ mxml_node_t *top, /* I - Top node */
+ int descend) /* I - Descend into tree - MXML_DESCEND, MXML_NO_DESCEND, or MXML_DESCEND_FIRST */
+{
+ if (!node)
+ return (NULL);
+ else if (node->child && descend)
+ return (node->child);
+ else if (node == top)
+ return (NULL);
+ else if (node->next)
+ return (node->next);
+ else if (node->parent && node->parent != top)
+ {
+ node = node->parent;
+
+ while (!node->next)
+ if (node->parent == top || !node->parent)
+ return (NULL);
+ else
+ node = node->parent;
+
+ return (node->next);
+ }
+ else
+ return (NULL);
+}
+
+
+/*
+ * 'mxmlWalkPrev()' - Walk to the previous logical node in the tree.
+ *
+ * The descend argument controls whether the previous node's last child
+ * is considered to be the previous node. The top node argument constrains
+ * the walk to the node's children.
+ */
+
+mxml_node_t * /* O - Previous node or NULL */
+mxmlWalkPrev(mxml_node_t *node, /* I - Current node */
+ mxml_node_t *top, /* I - Top node */
+ int descend) /* I - Descend into tree - MXML_DESCEND, MXML_NO_DESCEND, or MXML_DESCEND_FIRST */
+{
+ if (!node || node == top)
+ return (NULL);
+ else if (node->prev)
+ {
+ if (node->prev->last_child && descend)
+ {
+ /*
+ * Find the last child under the previous node...
+ */
+
+ node = node->prev->last_child;
+
+ while (node->last_child)
+ node = node->last_child;
+
+ return (node);
+ }
+ else
+ return (node->prev);
+ }
+ else if (node->parent != top)
+ return (node->parent);
+ else
+ return (NULL);
+}
+
+
+/*
+ * End of "$Id: mxml-search.c 427 2011-01-03 02:03:29Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-set.c b/tools/gator/daemon/mxml/mxml-set.c
new file mode 100644
index 000000000000..b0bd52790b2e
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-set.c
@@ -0,0 +1,349 @@
+/*
+ * "$Id: mxml-set.c 441 2011-12-09 23:49:00Z mike $"
+ *
+ * Node set functions for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * mxmlSetCDATA() - Set the element name of a CDATA node.
+ * mxmlSetCustom() - Set the data and destructor of a custom data node.
+ * mxmlSetElement() - Set the name of an element node.
+ * mxmlSetInteger() - Set the value of an integer node.
+ * mxmlSetOpaque() - Set the value of an opaque node.
+ * mxmlSetReal() - Set the value of a real number node.
+ * mxmlSetText() - Set the value of a text node.
+ * mxmlSetTextf() - Set the value of a text node to a formatted string.
+ * mxmlSetUserData() - Set the user data pointer for a node.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+#include "mxml.h"
+
+
+/*
+ * 'mxmlSetCDATA()' - Set the element name of a CDATA node.
+ *
+ * The node is not changed if it (or its first child) is not a CDATA element node.
+ *
+ * @since Mini-XML 2.3@
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetCDATA(mxml_node_t *node, /* I - Node to set */
+ const char *data) /* I - New data string */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ strncmp(node->value.element.name, "![CDATA[", 8) &&
+ node->child && node->child->type == MXML_ELEMENT &&
+ !strncmp(node->child->value.element.name, "![CDATA[", 8))
+ node = node->child;
+
+ if (!node || node->type != MXML_ELEMENT || !data ||
+ strncmp(node->value.element.name, "![CDATA[", 8))
+ return (-1);
+
+ /*
+ * Free any old element value and set the new value...
+ */
+
+ if (node->value.element.name)
+ free(node->value.element.name);
+
+ node->value.element.name = _mxml_strdupf("![CDATA[%s]]", data);
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetCustom()' - Set the data and destructor of a custom data node.
+ *
+ * The node is not changed if it (or its first child) is not a custom node.
+ *
+ * @since Mini-XML 2.1@
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetCustom(
+ mxml_node_t *node, /* I - Node to set */
+ void *data, /* I - New data pointer */
+ mxml_custom_destroy_cb_t destroy) /* I - New destructor function */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_CUSTOM)
+ node = node->child;
+
+ if (!node || node->type != MXML_CUSTOM)
+ return (-1);
+
+ /*
+ * Free any old element value and set the new value...
+ */
+
+ if (node->value.custom.data && node->value.custom.destroy)
+ (*(node->value.custom.destroy))(node->value.custom.data);
+
+ node->value.custom.data = data;
+ node->value.custom.destroy = destroy;
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetElement()' - Set the name of an element node.
+ *
+ * The node is not changed if it is not an element node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetElement(mxml_node_t *node, /* I - Node to set */
+ const char *name) /* I - New name string */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node || node->type != MXML_ELEMENT || !name)
+ return (-1);
+
+ /*
+ * Free any old element value and set the new value...
+ */
+
+ if (node->value.element.name)
+ free(node->value.element.name);
+
+ node->value.element.name = strdup(name);
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetInteger()' - Set the value of an integer node.
+ *
+ * The node is not changed if it (or its first child) is not an integer node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetInteger(mxml_node_t *node, /* I - Node to set */
+ int integer) /* I - Integer value */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_INTEGER)
+ node = node->child;
+
+ if (!node || node->type != MXML_INTEGER)
+ return (-1);
+
+ /*
+ * Set the new value and return...
+ */
+
+ node->value.integer = integer;
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetOpaque()' - Set the value of an opaque node.
+ *
+ * The node is not changed if it (or its first child) is not an opaque node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetOpaque(mxml_node_t *node, /* I - Node to set */
+ const char *opaque) /* I - Opaque string */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_OPAQUE)
+ node = node->child;
+
+ if (!node || node->type != MXML_OPAQUE || !opaque)
+ return (-1);
+
+ /*
+ * Free any old opaque value and set the new value...
+ */
+
+ if (node->value.opaque)
+ free(node->value.opaque);
+
+ node->value.opaque = strdup(opaque);
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetReal()' - Set the value of a real number node.
+ *
+ * The node is not changed if it (or its first child) is not a real number node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetReal(mxml_node_t *node, /* I - Node to set */
+ double real) /* I - Real number value */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_REAL)
+ node = node->child;
+
+ if (!node || node->type != MXML_REAL)
+ return (-1);
+
+ /*
+ * Set the new value and return...
+ */
+
+ node->value.real = real;
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetText()' - Set the value of a text node.
+ *
+ * The node is not changed if it (or its first child) is not a text node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetText(mxml_node_t *node, /* I - Node to set */
+ int whitespace, /* I - 1 = leading whitespace, 0 = no whitespace */
+ const char *string) /* I - String */
+{
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_TEXT)
+ node = node->child;
+
+ if (!node || node->type != MXML_TEXT || !string)
+ return (-1);
+
+ /*
+ * Free any old string value and set the new value...
+ */
+
+ if (node->value.text.string)
+ free(node->value.text.string);
+
+ node->value.text.whitespace = whitespace;
+ node->value.text.string = strdup(string);
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetTextf()' - Set the value of a text node to a formatted string.
+ *
+ * The node is not changed if it (or its first child) is not a text node.
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetTextf(mxml_node_t *node, /* I - Node to set */
+ int whitespace, /* I - 1 = leading whitespace, 0 = no whitespace */
+ const char *format, /* I - Printf-style format string */
+ ...) /* I - Additional arguments as needed */
+{
+ va_list ap; /* Pointer to arguments */
+
+
+ /*
+ * Range check input...
+ */
+
+ if (node && node->type == MXML_ELEMENT &&
+ node->child && node->child->type == MXML_TEXT)
+ node = node->child;
+
+ if (!node || node->type != MXML_TEXT || !format)
+ return (-1);
+
+ /*
+ * Free any old string value and set the new value...
+ */
+
+ if (node->value.text.string)
+ free(node->value.text.string);
+
+ va_start(ap, format);
+
+ node->value.text.whitespace = whitespace;
+ node->value.text.string = _mxml_strdupf(format, ap);
+
+ va_end(ap);
+
+ return (0);
+}
+
+
+/*
+ * 'mxmlSetUserData()' - Set the user data pointer for a node.
+ *
+ * @since Mini-XML 2.7@
+ */
+
+int /* O - 0 on success, -1 on failure */
+mxmlSetUserData(mxml_node_t *node, /* I - Node to set */
+ void *data) /* I - User data pointer */
+{
+ /*
+ * Range check input...
+ */
+
+ if (!node)
+ return (-1);
+
+ /*
+ * Set the user data pointer and return...
+ */
+
+ node->user_data = data;
+ return (0);
+}
+
+
+/*
+ * End of "$Id: mxml-set.c 441 2011-12-09 23:49:00Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml-string.c b/tools/gator/daemon/mxml/mxml-string.c
new file mode 100644
index 000000000000..6be42523f95c
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml-string.c
@@ -0,0 +1,476 @@
+/*
+ * "$Id: mxml-string.c 424 2010-12-25 16:21:50Z mike $"
+ *
+ * String functions for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2010 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ *
+ * Contents:
+ *
+ * _mxml_snprintf() - Format a string.
+ * _mxml_strdup() - Duplicate a string.
+ * _mxml_strdupf() - Format and duplicate a string.
+ * _mxml_vsnprintf() - Format a string into a fixed size buffer.
+ * _mxml_vstrdupf() - Format and duplicate a string.
+ */
+
+/*
+ * Include necessary headers...
+ */
+
+#include "config.h"
+
+
+/*
+ * The va_copy macro is part of C99, but many compilers don't implement it.
+ * Provide a "direct assignment" implmentation when va_copy isn't defined...
+ */
+
+#ifndef va_copy
+# ifdef __va_copy
+# define va_copy(dst,src) __va_copy(dst,src)
+# else
+# define va_copy(dst,src) memcpy(&dst, &src, sizeof(va_list))
+# endif /* __va_copy */
+#endif /* va_copy */
+
+
+#ifndef HAVE_SNPRINTF
+/*
+ * '_mxml_snprintf()' - Format a string.
+ */
+
+int /* O - Number of bytes formatted */
+_mxml_snprintf(char *buffer, /* I - Output buffer */
+ size_t bufsize, /* I - Size of output buffer */
+ const char *format, /* I - Printf-style format string */
+ ...) /* I - Additional arguments as needed */
+{
+ va_list ap; /* Argument list */
+ int bytes; /* Number of bytes formatted */
+
+
+ va_start(ap, format);
+ bytes = vsnprintf(buffer, bufsize, format, ap);
+ va_end(ap);
+
+ return (bytes);
+}
+#endif /* !HAVE_SNPRINTF */
+
+
+/*
+ * '_mxml_strdup()' - Duplicate a string.
+ */
+
+#ifndef HAVE_STRDUP
+char * /* O - New string pointer */
+_mxml_strdup(const char *s) /* I - String to duplicate */
+{
+ char *t; /* New string pointer */
+
+
+ if (s == NULL)
+ return (NULL);
+
+ if ((t = malloc(strlen(s) + 1)) == NULL)
+ return (NULL);
+
+ return (strcpy(t, s));
+}
+#endif /* !HAVE_STRDUP */
+
+
+/*
+ * '_mxml_strdupf()' - Format and duplicate a string.
+ */
+
+char * /* O - New string pointer */
+_mxml_strdupf(const char *format, /* I - Printf-style format string */
+ ...) /* I - Additional arguments as needed */
+{
+ va_list ap; /* Pointer to additional arguments */
+ char *s; /* Pointer to formatted string */
+
+
+ /*
+ * Get a pointer to the additional arguments, format the string,
+ * and return it...
+ */
+
+ va_start(ap, format);
+ s = _mxml_vstrdupf(format, ap);
+ va_end(ap);
+
+ return (s);
+}
+
+
+#ifndef HAVE_VSNPRINTF
+/*
+ * '_mxml_vsnprintf()' - Format a string into a fixed size buffer.
+ */
+
+int /* O - Number of bytes formatted */
+_mxml_vsnprintf(char *buffer, /* O - Output buffer */
+ size_t bufsize, /* O - Size of output buffer */
+ const char *format, /* I - Printf-style format string */
+ va_list ap) /* I - Pointer to additional arguments */
+{
+ char *bufptr, /* Pointer to position in buffer */
+ *bufend, /* Pointer to end of buffer */
+ sign, /* Sign of format width */
+ size, /* Size character (h, l, L) */
+ type; /* Format type character */
+ int width, /* Width of field */
+ prec; /* Number of characters of precision */
+ char tformat[100], /* Temporary format string for sprintf() */
+ *tptr, /* Pointer into temporary format */
+ temp[1024]; /* Buffer for formatted numbers */
+ char *s; /* Pointer to string */
+ int slen; /* Length of string */
+ int bytes; /* Total number of bytes needed */
+
+
+ /*
+ * Loop through the format string, formatting as needed...
+ */
+
+ bufptr = buffer;
+ bufend = buffer + bufsize - 1;
+ bytes = 0;
+
+ while (*format)
+ {
+ if (*format == '%')
+ {
+ tptr = tformat;
+ *tptr++ = *format++;
+
+ if (*format == '%')
+ {
+ if (bufptr && bufptr < bufend) *bufptr++ = *format;
+ bytes ++;
+ format ++;
+ continue;
+ }
+ else if (strchr(" -+#\'", *format))
+ {
+ *tptr++ = *format;
+ sign = *format++;
+ }
+ else
+ sign = 0;
+
+ if (*format == '*')
+ {
+ /*
+ * Get width from argument...
+ */
+
+ format ++;
+ width = va_arg(ap, int);
+
+ snprintf(tptr, sizeof(tformat) - (tptr - tformat), "%d", width);
+ tptr += strlen(tptr);
+ }
+ else
+ {
+ width = 0;
+
+ while (isdigit(*format & 255))
+ {
+ if (tptr < (tformat + sizeof(tformat) - 1))
+ *tptr++ = *format;
+
+ width = width * 10 + *format++ - '0';
+ }
+ }
+
+ if (*format == '.')
+ {
+ if (tptr < (tformat + sizeof(tformat) - 1))
+ *tptr++ = *format;
+
+ format ++;
+
+ if (*format == '*')
+ {
+ /*
+ * Get precision from argument...
+ */
+
+ format ++;
+ prec = va_arg(ap, int);
+
+ snprintf(tptr, sizeof(tformat) - (tptr - tformat), "%d", prec);
+ tptr += strlen(tptr);
+ }
+ else
+ {
+ prec = 0;
+
+ while (isdigit(*format & 255))
+ {
+ if (tptr < (tformat + sizeof(tformat) - 1))
+ *tptr++ = *format;
+
+ prec = prec * 10 + *format++ - '0';
+ }
+ }
+ }
+ else
+ prec = -1;
+
+ if (*format == 'l' && format[1] == 'l')
+ {
+ size = 'L';
+
+ if (tptr < (tformat + sizeof(tformat) - 2))
+ {
+ *tptr++ = 'l';
+ *tptr++ = 'l';
+ }
+
+ format += 2;
+ }
+ else if (*format == 'h' || *format == 'l' || *format == 'L')
+ {
+ if (tptr < (tformat + sizeof(tformat) - 1))
+ *tptr++ = *format;
+
+ size = *format++;
+ }
+
+ if (!*format)
+ break;
+
+ if (tptr < (tformat + sizeof(tformat) - 1))
+ *tptr++ = *format;
+
+ type = *format++;
+ *tptr = '\0';
+
+ switch (type)
+ {
+ case 'E' : /* Floating point formats */
+ case 'G' :
+ case 'e' :
+ case 'f' :
+ case 'g' :
+ if ((width + 2) > sizeof(temp))
+ break;
+
+ sprintf(temp, tformat, va_arg(ap, double));
+
+ bytes += strlen(temp);
+
+ if (bufptr)
+ {
+ if ((bufptr + strlen(temp)) > bufend)
+ {
+ strncpy(bufptr, temp, (size_t)(bufend - bufptr));
+ bufptr = bufend;
+ }
+ else
+ {
+ strcpy(bufptr, temp);
+ bufptr += strlen(temp);
+ }
+ }
+ break;
+
+ case 'B' : /* Integer formats */
+ case 'X' :
+ case 'b' :
+ case 'd' :
+ case 'i' :
+ case 'o' :
+ case 'u' :
+ case 'x' :
+ if ((width + 2) > sizeof(temp))
+ break;
+
+#ifdef HAVE_LONG_LONG
+ if (size == 'L')
+ sprintf(temp, tformat, va_arg(ap, long long));
+ else
+#endif /* HAVE_LONG_LONG */
+ sprintf(temp, tformat, va_arg(ap, int));
+
+ bytes += strlen(temp);
+
+ if (bufptr)
+ {
+ if ((bufptr + strlen(temp)) > bufend)
+ {
+ strncpy(bufptr, temp, (size_t)(bufend - bufptr));
+ bufptr = bufend;
+ }
+ else
+ {
+ strcpy(bufptr, temp);
+ bufptr += strlen(temp);
+ }
+ }
+ break;
+
+ case 'p' : /* Pointer value */
+ if ((width + 2) > sizeof(temp))
+ break;
+
+ sprintf(temp, tformat, va_arg(ap, void *));
+
+ bytes += strlen(temp);
+
+ if (bufptr)
+ {
+ if ((bufptr + strlen(temp)) > bufend)
+ {
+ strncpy(bufptr, temp, (size_t)(bufend - bufptr));
+ bufptr = bufend;
+ }
+ else
+ {
+ strcpy(bufptr, temp);
+ bufptr += strlen(temp);
+ }
+ }
+ break;
+
+ case 'c' : /* Character or character array */
+ bytes += width;
+
+ if (bufptr)
+ {
+ if (width <= 1)
+ *bufptr++ = va_arg(ap, int);
+ else
+ {
+ if ((bufptr + width) > bufend)
+ width = bufend - bufptr;
+
+ memcpy(bufptr, va_arg(ap, char *), (size_t)width);
+ bufptr += width;
+ }
+ }
+ break;
+
+ case 's' : /* String */
+ if ((s = va_arg(ap, char *)) == NULL)
+ s = "(null)";
+
+ slen = strlen(s);
+ if (slen > width && prec != width)
+ width = slen;
+
+ bytes += width;
+
+ if (bufptr)
+ {
+ if ((bufptr + width) > bufend)
+ width = bufend - bufptr;
+
+ if (slen > width)
+ slen = width;
+
+ if (sign == '-')
+ {
+ strncpy(bufptr, s, (size_t)slen);
+ memset(bufptr + slen, ' ', (size_t)(width - slen));
+ }
+ else
+ {
+ memset(bufptr, ' ', (size_t)(width - slen));
+ strncpy(bufptr + width - slen, s, (size_t)slen);
+ }
+
+ bufptr += width;
+ }
+ break;
+
+ case 'n' : /* Output number of chars so far */
+ *(va_arg(ap, int *)) = bytes;
+ break;
+ }
+ }
+ else
+ {
+ bytes ++;
+
+ if (bufptr && bufptr < bufend)
+ *bufptr++ = *format;
+
+ format ++;
+ }
+ }
+
+ /*
+ * Nul-terminate the string and return the number of characters needed.
+ */
+
+ *bufptr = '\0';
+
+ return (bytes);
+}
+#endif /* !HAVE_VSNPRINTF */
+
+
+/*
+ * '_mxml_vstrdupf()' - Format and duplicate a string.
+ */
+
+char * /* O - New string pointer */
+_mxml_vstrdupf(const char *format, /* I - Printf-style format string */
+ va_list ap) /* I - Pointer to additional arguments */
+{
+ int bytes; /* Number of bytes required */
+ char *buffer, /* String buffer */
+ temp[256]; /* Small buffer for first vsnprintf */
+ va_list apcopy; /* Copy of argument list */
+
+
+ /*
+ * First format with a tiny buffer; this will tell us how many bytes are
+ * needed...
+ */
+
+ va_copy(apcopy, ap);
+ bytes = vsnprintf(temp, sizeof(temp), format, apcopy);
+
+ if (bytes < sizeof(temp))
+ {
+ /*
+ * Hey, the formatted string fits in the tiny buffer, so just dup that...
+ */
+
+ return (strdup(temp));
+ }
+
+ /*
+ * Allocate memory for the whole thing and reformat to the new, larger
+ * buffer...
+ */
+
+ if ((buffer = calloc(1, bytes + 1)) != NULL)
+ vsnprintf(buffer, bytes + 1, format, ap);
+
+ /*
+ * Return the new string...
+ */
+
+ return (buffer);
+}
+
+
+/*
+ * End of "$Id: mxml-string.c 424 2010-12-25 16:21:50Z mike $".
+ */
diff --git a/tools/gator/daemon/mxml/mxml.h b/tools/gator/daemon/mxml/mxml.h
new file mode 100644
index 000000000000..79c711f4c80f
--- /dev/null
+++ b/tools/gator/daemon/mxml/mxml.h
@@ -0,0 +1,329 @@
+/*
+ * "$Id: mxml.h 427 2011-01-03 02:03:29Z mike $"
+ *
+ * Header file for Mini-XML, a small XML-like file parsing library.
+ *
+ * Copyright 2003-2011 by Michael R Sweet.
+ *
+ * These coded instructions, statements, and computer programs are the
+ * property of Michael R Sweet and are protected by Federal copyright
+ * law. Distribution and use rights are outlined in the file "COPYING"
+ * which should have been included with this file. If this file is
+ * missing or damaged, see the license at:
+ *
+ * http://www.minixml.org/
+ */
+
+/*
+ * Prevent multiple inclusion...
+ */
+
+#ifndef _mxml_h_
+# define _mxml_h_
+
+/*
+ * Include necessary headers...
+ */
+
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+# include <ctype.h>
+# include <errno.h>
+
+
+/*
+ * Constants...
+ */
+
+# define MXML_TAB 8 /* Tabs every N columns */
+
+# define MXML_NO_CALLBACK 0 /* Don't use a type callback */
+# define MXML_INTEGER_CALLBACK mxml_integer_cb
+ /* Treat all data as integers */
+# define MXML_OPAQUE_CALLBACK mxml_opaque_cb
+ /* Treat all data as opaque */
+# define MXML_REAL_CALLBACK mxml_real_cb
+ /* Treat all data as real numbers */
+# define MXML_TEXT_CALLBACK 0 /* Treat all data as text */
+# define MXML_IGNORE_CALLBACK mxml_ignore_cb
+ /* Ignore all non-element content */
+
+# define MXML_NO_PARENT 0 /* No parent for the node */
+
+# define MXML_DESCEND 1 /* Descend when finding/walking */
+# define MXML_NO_DESCEND 0 /* Don't descend when finding/walking */
+# define MXML_DESCEND_FIRST -1 /* Descend for first find */
+
+# define MXML_WS_BEFORE_OPEN 0 /* Callback for before open tag */
+# define MXML_WS_AFTER_OPEN 1 /* Callback for after open tag */
+# define MXML_WS_BEFORE_CLOSE 2 /* Callback for before close tag */
+# define MXML_WS_AFTER_CLOSE 3 /* Callback for after close tag */
+
+# define MXML_ADD_BEFORE 0 /* Add node before specified node */
+# define MXML_ADD_AFTER 1 /* Add node after specified node */
+# define MXML_ADD_TO_PARENT NULL /* Add node relative to parent */
+
+
+/*
+ * Data types...
+ */
+
+typedef enum mxml_sax_event_e /**** SAX event type. ****/
+{
+ MXML_SAX_CDATA, /* CDATA node */
+ MXML_SAX_COMMENT, /* Comment node */
+ MXML_SAX_DATA, /* Data node */
+ MXML_SAX_DIRECTIVE, /* Processing directive node */
+ MXML_SAX_ELEMENT_CLOSE, /* Element closed */
+ MXML_SAX_ELEMENT_OPEN /* Element opened */
+} mxml_sax_event_t;
+
+typedef enum mxml_type_e /**** The XML node type. ****/
+{
+ MXML_IGNORE = -1, /* Ignore/throw away node @since Mini-XML 2.3@ */
+ MXML_ELEMENT, /* XML element with attributes */
+ MXML_INTEGER, /* Integer value */
+ MXML_OPAQUE, /* Opaque string */
+ MXML_REAL, /* Real value */
+ MXML_TEXT, /* Text fragment */
+ MXML_CUSTOM /* Custom data @since Mini-XML 2.1@ */
+} mxml_type_t;
+
+typedef void (*mxml_custom_destroy_cb_t)(void *);
+ /**** Custom data destructor ****/
+
+typedef void (*mxml_error_cb_t)(const char *);
+ /**** Error callback function ****/
+
+typedef struct mxml_attr_s /**** An XML element attribute value. @private@ ****/
+{
+ char *name; /* Attribute name */
+ char *value; /* Attribute value */
+} mxml_attr_t;
+
+typedef struct mxml_element_s /**** An XML element value. @private@ ****/
+{
+ char *name; /* Name of element */
+ int num_attrs; /* Number of attributes */
+ mxml_attr_t *attrs; /* Attributes */
+} mxml_element_t;
+
+typedef struct mxml_text_s /**** An XML text value. @private@ ****/
+{
+ int whitespace; /* Leading whitespace? */
+ char *string; /* Fragment string */
+} mxml_text_t;
+
+typedef struct mxml_custom_s /**** An XML custom value. @private@ ****/
+{
+ void *data; /* Pointer to (allocated) custom data */
+ mxml_custom_destroy_cb_t destroy; /* Pointer to destructor function */
+} mxml_custom_t;
+
+typedef union mxml_value_u /**** An XML node value. @private@ ****/
+{
+ mxml_element_t element; /* Element */
+ int integer; /* Integer number */
+ char *opaque; /* Opaque string */
+ double real; /* Real number */
+ mxml_text_t text; /* Text fragment */
+ mxml_custom_t custom; /* Custom data @since Mini-XML 2.1@ */
+} mxml_value_t;
+
+struct mxml_node_s /**** An XML node. @private@ ****/
+{
+ mxml_type_t type; /* Node type */
+ struct mxml_node_s *next; /* Next node under same parent */
+ struct mxml_node_s *prev; /* Previous node under same parent */
+ struct mxml_node_s *parent; /* Parent node */
+ struct mxml_node_s *child; /* First child node */
+ struct mxml_node_s *last_child; /* Last child node */
+ mxml_value_t value; /* Node value */
+ int ref_count; /* Use count */
+ void *user_data; /* User data */
+};
+
+typedef struct mxml_node_s mxml_node_t; /**** An XML node. ****/
+
+struct mxml_index_s /**** An XML node index. @private@ ****/
+{
+ char *attr; /* Attribute used for indexing or NULL */
+ int num_nodes; /* Number of nodes in index */
+ int alloc_nodes; /* Allocated nodes in index */
+ int cur_node; /* Current node */
+ mxml_node_t **nodes; /* Node array */
+};
+
+typedef struct mxml_index_s mxml_index_t;
+ /**** An XML node index. ****/
+
+typedef int (*mxml_custom_load_cb_t)(mxml_node_t *, const char *);
+ /**** Custom data load callback function ****/
+
+typedef char *(*mxml_custom_save_cb_t)(mxml_node_t *);
+ /**** Custom data save callback function ****/
+
+typedef int (*mxml_entity_cb_t)(const char *);
+ /**** Entity callback function */
+
+typedef mxml_type_t (*mxml_load_cb_t)(mxml_node_t *);
+ /**** Load callback function ****/
+
+typedef const char *(*mxml_save_cb_t)(mxml_node_t *, int);
+ /**** Save callback function ****/
+
+typedef void (*mxml_sax_cb_t)(mxml_node_t *, mxml_sax_event_t, void *);
+ /**** SAX callback function ****/
+
+
+/*
+ * C++ support...
+ */
+
+# ifdef __cplusplus
+extern "C" {
+# endif /* __cplusplus */
+
+/*
+ * Prototypes...
+ */
+
+extern void mxmlAdd(mxml_node_t *parent, int where,
+ mxml_node_t *child, mxml_node_t *node);
+extern void mxmlDelete(mxml_node_t *node);
+extern void mxmlElementDeleteAttr(mxml_node_t *node,
+ const char *name);
+extern const char *mxmlElementGetAttr(mxml_node_t *node, const char *name);
+extern void mxmlElementSetAttr(mxml_node_t *node, const char *name,
+ const char *value);
+extern void mxmlElementSetAttrf(mxml_node_t *node, const char *name,
+ const char *format, ...)
+# ifdef __GNUC__
+__attribute__ ((__format__ (__printf__, 3, 4)))
+# endif /* __GNUC__ */
+;
+extern int mxmlEntityAddCallback(mxml_entity_cb_t cb);
+extern const char *mxmlEntityGetName(int val);
+extern int mxmlEntityGetValue(const char *name);
+extern void mxmlEntityRemoveCallback(mxml_entity_cb_t cb);
+extern mxml_node_t *mxmlFindElement(mxml_node_t *node, mxml_node_t *top,
+ const char *name, const char *attr,
+ const char *value, int descend);
+extern mxml_node_t *mxmlFindPath(mxml_node_t *node, const char *path);
+extern const char *mxmlGetCDATA(mxml_node_t *node);
+extern const void *mxmlGetCustom(mxml_node_t *node);
+extern const char *mxmlGetElement(mxml_node_t *node);
+extern mxml_node_t *mxmlGetFirstChild(mxml_node_t *node);
+extern int mxmlGetInteger(mxml_node_t *node);
+extern mxml_node_t *mxmlGetLastChild(mxml_node_t *node);
+extern mxml_node_t *mxmlGetNextSibling(mxml_node_t *node);
+extern const char *mxmlGetOpaque(mxml_node_t *node);
+extern mxml_node_t *mxmlGetParent(mxml_node_t *node);
+extern mxml_node_t *mxmlGetPrevSibling(mxml_node_t *node);
+extern double mxmlGetReal(mxml_node_t *node);
+extern int mxmlGetRefCount(mxml_node_t *node);
+extern const char *mxmlGetText(mxml_node_t *node, int *whitespace);
+extern mxml_type_t mxmlGetType(mxml_node_t *node);
+extern void *mxmlGetUserData(mxml_node_t *node);
+extern void mxmlIndexDelete(mxml_index_t *ind);
+extern mxml_node_t *mxmlIndexEnum(mxml_index_t *ind);
+extern mxml_node_t *mxmlIndexFind(mxml_index_t *ind,
+ const char *element,
+ const char *value);
+extern int mxmlIndexGetCount(mxml_index_t *ind);
+extern mxml_index_t *mxmlIndexNew(mxml_node_t *node, const char *element,
+ const char *attr);
+extern mxml_node_t *mxmlIndexReset(mxml_index_t *ind);
+extern mxml_node_t *mxmlLoadFd(mxml_node_t *top, int fd,
+ mxml_type_t (*cb)(mxml_node_t *));
+extern mxml_node_t *mxmlLoadFile(mxml_node_t *top, FILE *fp,
+ mxml_type_t (*cb)(mxml_node_t *));
+extern mxml_node_t *mxmlLoadString(mxml_node_t *top, const char *s,
+ mxml_type_t (*cb)(mxml_node_t *));
+extern mxml_node_t *mxmlNewCDATA(mxml_node_t *parent, const char *string);
+extern mxml_node_t *mxmlNewCustom(mxml_node_t *parent, void *data,
+ mxml_custom_destroy_cb_t destroy);
+extern mxml_node_t *mxmlNewElement(mxml_node_t *parent, const char *name);
+extern mxml_node_t *mxmlNewInteger(mxml_node_t *parent, int integer);
+extern mxml_node_t *mxmlNewOpaque(mxml_node_t *parent, const char *opaque);
+extern mxml_node_t *mxmlNewReal(mxml_node_t *parent, double real);
+extern mxml_node_t *mxmlNewText(mxml_node_t *parent, int whitespace,
+ const char *string);
+extern mxml_node_t *mxmlNewTextf(mxml_node_t *parent, int whitespace,
+ const char *format, ...)
+# ifdef __GNUC__
+__attribute__ ((__format__ (__printf__, 3, 4)))
+# endif /* __GNUC__ */
+;
+extern mxml_node_t *mxmlNewXML(const char *version);
+extern int mxmlRelease(mxml_node_t *node);
+extern void mxmlRemove(mxml_node_t *node);
+extern int mxmlRetain(mxml_node_t *node);
+extern char *mxmlSaveAllocString(mxml_node_t *node,
+ mxml_save_cb_t cb);
+extern int mxmlSaveFd(mxml_node_t *node, int fd,
+ mxml_save_cb_t cb);
+extern int mxmlSaveFile(mxml_node_t *node, FILE *fp,
+ mxml_save_cb_t cb);
+extern int mxmlSaveString(mxml_node_t *node, char *buffer,
+ int bufsize, mxml_save_cb_t cb);
+extern mxml_node_t *mxmlSAXLoadFd(mxml_node_t *top, int fd,
+ mxml_type_t (*cb)(mxml_node_t *),
+ mxml_sax_cb_t sax, void *sax_data);
+extern mxml_node_t *mxmlSAXLoadFile(mxml_node_t *top, FILE *fp,
+ mxml_type_t (*cb)(mxml_node_t *),
+ mxml_sax_cb_t sax, void *sax_data);
+extern mxml_node_t *mxmlSAXLoadString(mxml_node_t *top, const char *s,
+ mxml_type_t (*cb)(mxml_node_t *),
+ mxml_sax_cb_t sax, void *sax_data);
+extern int mxmlSetCDATA(mxml_node_t *node, const char *data);
+extern int mxmlSetCustom(mxml_node_t *node, void *data,
+ mxml_custom_destroy_cb_t destroy);
+extern void mxmlSetCustomHandlers(mxml_custom_load_cb_t load,
+ mxml_custom_save_cb_t save);
+extern int mxmlSetElement(mxml_node_t *node, const char *name);
+extern void mxmlSetErrorCallback(mxml_error_cb_t cb);
+extern int mxmlSetInteger(mxml_node_t *node, int integer);
+extern int mxmlSetOpaque(mxml_node_t *node, const char *opaque);
+extern int mxmlSetReal(mxml_node_t *node, double real);
+extern int mxmlSetText(mxml_node_t *node, int whitespace,
+ const char *string);
+extern int mxmlSetTextf(mxml_node_t *node, int whitespace,
+ const char *format, ...)
+# ifdef __GNUC__
+__attribute__ ((__format__ (__printf__, 3, 4)))
+# endif /* __GNUC__ */
+;
+extern int mxmlSetUserData(mxml_node_t *node, void *data);
+extern void mxmlSetWrapMargin(int column);
+extern mxml_node_t *mxmlWalkNext(mxml_node_t *node, mxml_node_t *top,
+ int descend);
+extern mxml_node_t *mxmlWalkPrev(mxml_node_t *node, mxml_node_t *top,
+ int descend);
+
+
+/*
+ * Semi-private functions...
+ */
+
+extern void mxml_error(const char *format, ...);
+extern mxml_type_t mxml_ignore_cb(mxml_node_t *node);
+extern mxml_type_t mxml_integer_cb(mxml_node_t *node);
+extern mxml_type_t mxml_opaque_cb(mxml_node_t *node);
+extern mxml_type_t mxml_real_cb(mxml_node_t *node);
+
+
+/*
+ * C++ support...
+ */
+
+# ifdef __cplusplus
+}
+# endif /* __cplusplus */
+#endif /* !_mxml_h_ */
+
+
+/*
+ * End of "$Id: mxml.h 427 2011-01-03 02:03:29Z mike $".
+ */
diff --git a/tools/gator/debian/changelog b/tools/gator/debian/changelog
new file mode 100644
index 000000000000..a5f5f9d22f0b
--- /dev/null
+++ b/tools/gator/debian/changelog
@@ -0,0 +1,132 @@
+gator (5.13.0-20130115161200) quantal; urgency=low
+
+ * Automatically packaged from git repository:
+ URL:
+ Tag:
+ Branch:
+ Commit: 6caa6478179ce1af5ca0e70a8d4d7fa4ea6f5ea3
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Tue, 15 Jan 2013 16:12:00 +0000
+
+gator (5.13.0) quantal; urgency=low
+
+ * New release from ARM.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Mon, 14 Jan 2013 10:04:42 +0000
+
+gator (5.12.0) precise; urgency=low
+
+ * New release from ARM.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Wed, 24 Oct 2012 17:08:17 +0000
+
+gator (5.11.0) precise; urgency=low
+
+ * New release from ARM.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Fri, 10 Aug 2012 14:34:23 +0000
+
+gator (5.10.1) precise; urgency=low
+
+ * Add gator-module-dkms package.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Mon, 21 May 2012 15:59:10 +0000
+
+gator (5.10.0) precise; urgency=low
+
+ * New release from ARM.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Thu, 10 May 2012 16:47:32 +0000
+
+gator (5.9.7) precise; urgency=low
+
+ * New source and binary package layout with automated packaging scripts.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Wed, 2 May 2012 17:01:18 +0000
+
+gator-daemon (5.9-6) precise; urgency=low
+
+ * Remove Depends: gator-module* as Linaro kernels should
+ now all contain the required module.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Wed, 25 Apr 2012 18:06:28 +0000
+
+gator-daemon (5.9-5) precise; urgency=low
+
+ * Move to armhf and Precise
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Wed, 25 Apr 2012 10:16:00 +0000
+
+gator-daemon (5.9-4) oneiric; urgency=low
+
+ * Add alternative Depends on gator-module-in-tree.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Fri, 16 Mar 2012 18:24:27 +0000
+
+gator-daemon (5.9-3) oneiric; urgency=low
+
+ * Add missing build depends: 'quilt'.
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Thu, 23 Feb 2012 13:59:08 +0000
+
+gator-daemon (5.9-2) oneiric; urgency=low
+
+ [ Wade Cherry ]
+ * Fix counters initialisation
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Thu, 23 Feb 2012 12:06:33 +0000
+
+gator-daemon (5.9-1) oneiric; urgency=low
+
+ * New upstream release (DS-5.9)
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Mon, 20 Feb 2012 10:35:47 +0000
+
+gator-daemon (5.9~rc1-1) oneiric; urgency=low
+
+ * New upstream release (DS-5.9)
+
+ -- Jon Medhurst (Tixy) <jon.medhurst@linaro.org> Wed, 15 Feb 2012 14:38:26 +0000
+
+gator-daemon (5.8-1) oneiric; urgency=low
+
+ * New upstream release
+
+ -- Ricardo Salveti de Araujo <ricardo.salveti@linaro.org> Fri, 25 Nov 2011 12:41:57 -0200
+
+gator-daemon (5.8~rc2-1) oneiric; urgency=low
+
+ * New upstream version (DS-5.8~rc2)
+
+ -- Ricardo Salveti de Araujo <ricardo.salveti@linaro.org> Wed, 16 Nov 2011 15:30:07 -0200
+
+gator-daemon (5.7-2) natty; urgency=low
+
+ * Package now depends on the new gator-module meta package
+
+ -- Ricardo Salveti de Araujo <ricardo.salveti@linaro.org> Mon, 10 Oct 2011 23:39:34 -0300
+
+gator-daemon (5.7-1) natty; urgency=low
+
+ * Updated to DS-5.7 release
+ * Added sources (GPLv2)
+
+ -- Pawel Moll <pawel.moll@arm.com> Fri, 30 Sep 2011 16:02:05 +0100
+
+gator-daemon (5.6-2) natty; urgency=low
+
+ * Added dependency on 5.6 version of the gator-module[-dkms]
+
+ -- Pawel Moll <pawel.moll@arm.com> Tue, 13 Sep 2011 15:59:37 +0100
+
+gator-daemon (5.6-1) natty; urgency=low
+
+ * Updated to DS-5.6 release
+
+ -- Pawel Moll <pawel.moll@arm.com> Wed, 03 Aug 2011 11:54:40 +0100
+
+gator-daemon (5.5-1) natty; urgency=low
+
+ * Initial release
+
+ -- Pawel Moll <pawel.moll@arm.com> Mon, 06 Jun 2011 18:42:31 +0100
diff --git a/tools/gator/debian/compat b/tools/gator/debian/compat
new file mode 100644
index 000000000000..45a4fb75db86
--- /dev/null
+++ b/tools/gator/debian/compat
@@ -0,0 +1 @@
+8
diff --git a/tools/gator/debian/control b/tools/gator/debian/control
new file mode 100644
index 000000000000..f20a736e5e7e
--- /dev/null
+++ b/tools/gator/debian/control
@@ -0,0 +1,38 @@
+Source: gator
+Section: devel
+Priority: extra
+Maintainer: Pawel Moll <pawel.moll@arm.com>
+Build-Depends: debhelper (>= 7.0.5), vim-common
+Standards-Version: 3.9.2
+Homepage: http://www.arm.com/ds-5
+
+Package: gator
+Architecture: armhf
+Depends: ${misc:Depends}, gator-daemon (>= 5.9.0)
+Description: ARM Streamline gator
+ This meta-package depends on gator target packages required to use
+ ARM Streamline Performance Analyzer.
+
+Package: gator-daemon
+Architecture: armhf
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: ARM Streamline gator daemon
+ The target-side daemon gathering data for ARM Streamline
+ Performance Analyzer.
+
+Package: arm-ds5-target
+Architecture: armhf
+Depends: ${misc:Depends}, gdbserver (>= 6.8), openssh-server | ssh-server, gator (>= 5.9.0)
+Description: ARM DS-5 target dependencies
+ This meta-package depends on all components required to receive
+ full ARM DS-5 experience.
+
+Package: gator-module-dkms
+Architecture: armhf
+Section: kernel
+Depends: ${misc:Depends}, dkms (>= 1.95), make
+Description: ARM Streamline gator module - dkms version
+ The target-side kernel module gathering data for ARM Streamline
+ Performance Analyzer.
+ .
+ This package contains the source to be built with dkms.
diff --git a/tools/gator/debian/copyright b/tools/gator/debian/copyright
new file mode 100644
index 000000000000..3ed568c02e8a
--- /dev/null
+++ b/tools/gator/debian/copyright
@@ -0,0 +1,15 @@
+Files: *
+Copyright: 2010-2012 ARM Ltd.
+License: GPL-2
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ with your system, in /usr/share/common-licenses/GPL-2. If not, see
+ <http://www.gnu.org/licenses/>.
diff --git a/tools/gator/debian/dkms.conf.in b/tools/gator/debian/dkms.conf.in
new file mode 100644
index 000000000000..5cb51e58a5ed
--- /dev/null
+++ b/tools/gator/debian/dkms.conf.in
@@ -0,0 +1,7 @@
+PACKAGE_NAME="gator-module"
+PACKAGE_VERSION="__VERSION__"
+MAKE[0]="make -C ${kernel_source_dir} M=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build"
+CLEAN="make -C ${kernel_source_dir} M=${dkms_tree}/${PACKAGE_NAME}/${PACKAGE_VERSION}/build clean"
+BUILT_MODULE_NAME[0]="gator"
+DEST_MODULE_LOCATION[0]="/extra"
+AUTOINSTALL=yes
diff --git a/tools/gator/debian/gator-daemon.install b/tools/gator/debian/gator-daemon.install
new file mode 100644
index 000000000000..7b0d218c425d
--- /dev/null
+++ b/tools/gator/debian/gator-daemon.install
@@ -0,0 +1 @@
+daemon/gatord /usr/sbin/
diff --git a/tools/gator/debian/gator-daemon.postrm b/tools/gator/debian/gator-daemon.postrm
new file mode 100644
index 000000000000..97518f4e1a16
--- /dev/null
+++ b/tools/gator/debian/gator-daemon.postrm
@@ -0,0 +1,41 @@
+#!/bin/sh
+# postrm script for gator-daemon
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <postrm> `remove'
+# * <postrm> `purge'
+# * <old-postrm> `upgrade' <new-version>
+# * <new-postrm> `failed-upgrade' <old-version>
+# * <new-postrm> `abort-install'
+# * <new-postrm> `abort-install' <old-version>
+# * <new-postrm> `abort-upgrade' <old-version>
+# * <disappearer's-postrm> `disappear' <overwriter>
+# <overwriter-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ purge)
+ rm -rf /var/lib/gatord/
+ ;;
+
+ remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/tools/gator/debian/gator-daemon.preinst b/tools/gator/debian/gator-daemon.preinst
new file mode 100644
index 000000000000..bb2dc7e5fe71
--- /dev/null
+++ b/tools/gator/debian/gator-daemon.preinst
@@ -0,0 +1,36 @@
+#!/bin/sh
+# preinst script for gator-daemon
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ mkdir -p /var/lib/gatord/
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/tools/gator/debian/gator-daemon.upstart b/tools/gator/debian/gator-daemon.upstart
new file mode 100644
index 000000000000..1564efc53393
--- /dev/null
+++ b/tools/gator/debian/gator-daemon.upstart
@@ -0,0 +1,10 @@
+description "Load gator driver module and launch gator daemon"
+author "Pawel Moll <pawel.moll@arm.com>"
+
+start on filesystem
+
+exec /usr/sbin/gatord -c /var/lib/gatord/configuration.xml
+
+pre-start script
+ modprobe -q gator || true
+end script
diff --git a/tools/gator/debian/gator-module-dkms.install.in b/tools/gator/debian/gator-module-dkms.install.in
new file mode 100755
index 000000000000..4691ff785752
--- /dev/null
+++ b/tools/gator/debian/gator-module-dkms.install.in
@@ -0,0 +1,2 @@
+module/* /usr/src/gator-module-__VERSION__/
+debian/dkms.conf /usr/src/gator-module-__VERSION__/ \ No newline at end of file
diff --git a/tools/gator/debian/gator-module-dkms.postinst b/tools/gator/debian/gator-module-dkms.postinst
new file mode 100755
index 000000000000..b3de65e5b06c
--- /dev/null
+++ b/tools/gator/debian/gator-module-dkms.postinst
@@ -0,0 +1,49 @@
+#!/bin/sh
+# Copyright (C) 2002-2005 Flavio Stanchina
+# Copyright (C) 2005-2006 Aric Cyr
+# Copyright (C) 2007 Mario Limonciello
+# Copyright (C) 2009 Alberto Milone
+# Copyright (C) 2010 Ricardo Salveti de Araujo
+
+set -e
+
+NAME=gator-module
+PACKAGE_NAME=$NAME-dkms
+CVERSION=`dpkg-query -W -f='${Version}' $PACKAGE_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
+ARCH=`dpkg --print-architecture`
+
+dkms_configure () {
+ for POSTINST in /usr/lib/dkms/common.postinst "/usr/share/$PACKAGE_NAME/postinst"; do
+ if [ -f "$POSTINST" ]; then
+ "$POSTINST" "$NAME" "$CVERSION" "/usr/share/$PACKAGE_NAME" "$ARCH" "$2"
+ return $?
+ fi
+ echo "WARNING: $POSTINST does not exist." >&2
+ done
+ echo "ERROR: DKMS version is too old and $PACKAGE_NAME was not" >&2
+ echo "built with legacy DKMS support." >&2
+ echo "You must either rebuild $PACKAGE_NAME with legacy postinst" >&2
+ echo "support or upgrade DKMS to a more current version." >&2
+ return 1
+}
+
+case "$1" in
+ configure)
+ dkms_configure
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/tools/gator/debian/gator-module-dkms.prerm b/tools/gator/debian/gator-module-dkms.prerm
new file mode 100755
index 000000000000..cf0b5f99da76
--- /dev/null
+++ b/tools/gator/debian/gator-module-dkms.prerm
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+NAME=gator-module
+PACKAGE_NAME=$NAME-dkms
+VERSION=`dpkg-query -W -f='${Version}' $PACKAGE_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
+
+set -e
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ if [ "`dkms status -m $NAME`" ]; then
+ dkms remove -m $NAME -v $VERSION --all
+ fi
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/tools/gator/debian/rules b/tools/gator/debian/rules
new file mode 100755
index 000000000000..c747748ff83c
--- /dev/null
+++ b/tools/gator/debian/rules
@@ -0,0 +1,15 @@
+#!/usr/bin/make -f
+
+DEB_HOST_GNU_TYPE = $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
+DEB_BUILD_GNU_TYPE = $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
+
+ifneq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
+ CROSS_COMPILE ?= $(DEB_HOST_GNU_TYPE)-
+endif
+
+export CROSS_COMPILE
+
+export DH_OPTIONS
+
+%:
+ dh $@
diff --git a/tools/gator/debian/scripts/do-packaging b/tools/gator/debian/scripts/do-packaging
new file mode 100755
index 000000000000..1573d7e22f32
--- /dev/null
+++ b/tools/gator/debian/scripts/do-packaging
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -e
+
+while [ $1 ]
+do
+ OPT=$1
+ shift
+ if [ "$OPT" == "--commit" ]
+ then
+ OPT_COMMIT=y
+ continue
+ fi
+ if [ "$OPT" == "--sign" ]
+ then
+ OPT_SIGN=y
+ continue
+ fi
+ if [ "$OPT" == "--no-git-clean" ]
+ then
+ OPT_NO_GIT_CLEAN=y
+ continue
+ fi
+ echo $0: Unknown option: "$OPT" 1>&2
+ exit 1
+done
+
+
+#
+# Clean working dirctory
+#
+
+if [ ! $OPT_NO_GIT_CLEAN ]
+then
+ git reset --hard HEAD
+ git clean -dfx ../..
+fi
+git checkout debian/changelog
+
+if [ ! -d module ]
+then
+ mkdir module
+ cp -a ../../drivers/gator/* module/
+fi
+
+#
+# Update changelog
+#
+
+if [ "$DEBEMAIL" == "" -o "$DEBFULLNAME" == "" ]
+then
+ export DEBEMAIL='jon.medhurst@linaro.org'
+ export DEBFULLNAME='Jon Medhurst (Tixy)'
+fi
+dch -i -c debian/changelog CHANGE_MSG
+dch -r -c debian/changelog ""
+
+TIMESTAMP="$(date -u +%Y%m%d%H%M%S)"
+sed -i -e "1 s/ubuntu1/-$TIMESTAMP/" debian/changelog
+
+GIT_COMMIT="$(git rev-parse HEAD)"
+GIT_REMOTE_AND_BRANCH="$(git show-ref | sed -n 's,'$GIT_COMMIT'.*refs/remotes/\(.*\),\1,p' | sed -n 1p)"
+GIT_REMOTE=$(echo $GIT_REMOTE_AND_BRANCH | awk -F/ '{print $1}')
+GIT_BRANCH=$(echo $GIT_REMOTE_AND_BRANCH | awk -F/ '{print $2}')
+GIT_URL="$(git remote -v show $GIT_REMOTE | sed -n 's/.*Fetch URL: \(.*\)/\1/p')"
+GIT_TAG="$(git describe --exact-match --tags $GIT_COMMIT 2>/dev/null || echo "")"
+
+CHANGE_MSG="Automatically packaged from git repository:\n"
+CHANGE_MSG="$CHANGE_MSG URL: $GIT_URL\n"
+CHANGE_MSG="$CHANGE_MSG Tag: $GIT_TAG\n"
+CHANGE_MSG="$CHANGE_MSG Branch: $GIT_BRANCH\n"
+CHANGE_MSG="$CHANGE_MSG Commit: $GIT_COMMIT"
+sed -i -e "s%CHANGE_MSG%$CHANGE_MSG%" debian/changelog
+
+echo $CHANGE_MSG
+
+FULL_VERSION="$(sed -n '1s/.*(\(.*\)).*/\1/p' debian/changelog)"
+VERSION="$(echo $FULL_VERSION | sed 's/\([0-9.]*\).*/\1/')"
+sed "s/__VERSION__/$VERSION/g" debian/dkms.conf.in > debian/dkms.conf
+sed "s/__VERSION__/$VERSION/g" debian/gator-module-dkms.install.in > debian/gator-module-dkms.install
+
+
+#
+# Commit changes
+#
+
+if [ $OPT_COMMIT ]
+then
+ git add debian/changelog
+ git commit -s -m "gator-$FULL_VERSION: Automated packaging"
+fi
+
+
+#
+# Build packages
+#
+
+if [ ! $OPT_SIGN ]
+then
+ DEBUILD_SIGN="-us -uc"
+fi
+
+debuild $DEBUILD_SIGN -aarmhf -b
+debuild --no-tgz-check $DEBUILD_SIGN -S