aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 16:46:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 16:46:44 -0800
commitfffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb (patch)
tree71bc5e597124dbaf7550f1e089d675718b3ed5c0 /drivers
parent69086a78bdc973ec0b722be790b146e84ba8a8c4 (diff)
parentbe88298b0a3f771a4802f20c5e66af74bfd1dff1 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm merge from Dave Airlie: "Highlights: - TI LCD controller KMS driver - TI OMAP KMS driver merged from staging - drop gma500 stub driver - the fbcon locking fixes - the vgacon dirty like zebra fix. - open firmware videomode and hdmi common code helpers - major locking rework for kms object handling - pageflip/cursor won't block on polling anymore! - fbcon helper and prime helper cleanups - i915: all over the map, haswell power well enhancements, valleyview macro horrors cleaned up, killing lots of legacy GTT code, - radeon: CS ioctl unification, deprecated UMS support, gpu reset rework, VM fixes - nouveau: reworked thermal code, external dp/tmds encoder support (anx9805), fences sleep instead of polling, - exynos: all over the driver fixes." Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d ("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd") and the new changes that modified that evergreen_dma_cs_parse() function. * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits) drm/tilcdc: only build on arm drm/i915: Revert hdmi HDP pin checks drm/tegra: Add list of framebuffers to debugfs drm/tegra: Fix color expansion drm/tegra: Split DC_CMD_STATE_CONTROL register write drm/tegra: Implement page-flipping support drm/tegra: Implement VBLANK support drm/tegra: Implement .mode_set_base() drm/tegra: Add plane support drm/tegra: Remove bogus tegra_framebuffer structure drm: Add consistency check for page-flipping drm/radeon: Use generic HDMI infoframe helpers drm/tegra: Use generic HDMI infoframe helpers drm: Add EDID helper documentation drm: Add HDMI infoframe helpers video: Add generic HDMI infoframe helpers drm: Add some missing forward declarations drm: Move mode tables to drm_edid.c drm: Remove duplicate drm_mode_cea_vic() gma500: Fix n, m1 and m2 clock limits for sdvo and lvds ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-gtt.c128
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c27
-rw-r--r--drivers/gpu/drm/ast/ast_main.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c816
-rw-r--r--drivers/gpu/drm/drm_edid.c843
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h774
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c63
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c95
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c310
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c12
-rw-r--r--drivers/gpu/drm/drm_mm.c96
-rw-r--r--drivers/gpu/drm/drm_modes.c70
-rw-r--r--drivers/gpu/drm/drm_pci.c81
-rw-r--r--drivers/gpu/drm/drm_prime.c186
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1035
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c34
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c43
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c12
-rw-r--r--drivers/gpu/drm/i2c/Kconfig28
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c906
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c254
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h475
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c516
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c370
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h436
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c540
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c503
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c79
-rw-r--r--drivers/gpu/drm/i915/intel_display.c975
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c374
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h41
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c55
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c103
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c250
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c95
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c113
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c67
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c46
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c28
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig28
-rw-r--r--drivers/gpu/drm/nouveau/Makefile29
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c346
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c371
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c309
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h127
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c481
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c244
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c130
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c233
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c173
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c307
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c186
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig (renamed from drivers/staging/omapdrm/Kconfig)0
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile (renamed from drivers/staging/omapdrm/Makefile)0
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c (renamed from drivers/staging/omapdrm/omap_connector.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c (renamed from drivers/staging/omapdrm/omap_crtc.c)14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c (renamed from drivers/staging/omapdrm/omap_debugfs.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h (renamed from drivers/staging/omapdrm/omap_dmm_priv.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c (renamed from drivers/staging/omapdrm/omap_dmm_tiler.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h (renamed from drivers/staging/omapdrm/omap_dmm_tiler.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c (renamed from drivers/staging/omapdrm/omap_drv.c)6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h (renamed from drivers/staging/omapdrm/omap_drv.h)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c (renamed from drivers/staging/omapdrm/omap_encoder.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c (renamed from drivers/staging/omapdrm/omap_fb.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c (renamed from drivers/staging/omapdrm/omap_fbdev.c)34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c (renamed from drivers/staging/omapdrm/omap_gem.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c (renamed from drivers/staging/omapdrm/omap_gem_dmabuf.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c (renamed from drivers/staging/omapdrm/omap_gem_helpers.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c (renamed from drivers/staging/omapdrm/omap_irq.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c (renamed from drivers/staging/omapdrm/omap_plane.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c (renamed from drivers/staging/omapdrm/tcm-sita.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h (renamed from drivers/staging/omapdrm/tcm-sita.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h (renamed from drivers/staging/omapdrm/tcm.h)0
-rw-r--r--drivers/gpu/drm/radeon/Kconfig33
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c366
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c85
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h54
-rw-r--r--drivers/gpu/drm/radeon/ni.c339
-rw-r--r--drivers/gpu/drm/radeon/nid.h27
-rw-r--r--drivers/gpu/drm/radeon/r100.c224
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r100d.h11
-rw-r--r--drivers/gpu/drm/radeon/r200.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h11
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c401
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c332
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c135
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c509
-rw-r--r--drivers/gpu/drm/radeon/sid.h30
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c585
-rw-r--r--drivers/gpu/drm/tegra/dc.h14
-rw-r--r--drivers/gpu/drm/tegra/drm.c103
-rw-r--r--drivers/gpu/drm/tegra/drm.h43
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c226
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h189
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig13
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c602
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c611
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h150
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c436
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h154
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c376
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c419
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c78
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c78
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/stub/Kconfig18
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO32
-rw-r--r--drivers/staging/omapdrm/omap_drm.h123
-rw-r--r--drivers/tty/vt/vt.c136
-rw-r--r--drivers/video/Kconfig26
-rw-r--r--drivers/video/Makefile5
-rw-r--r--drivers/video/console/fbcon.c58
-rw-r--r--drivers/video/console/vgacon.c22
-rw-r--r--drivers/video/display_timing.c24
-rw-r--r--drivers/video/fbmem.c11
-rw-r--r--drivers/video/fbmon.c94
-rw-r--r--drivers/video/fbsysfs.c3
-rw-r--r--drivers/video/hdmi.c308
-rw-r--r--drivers/video/of_display_timing.c239
-rw-r--r--drivers/video/of_videomode.c54
-rw-r--r--drivers/video/via/hw.c6
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c2
-rw-r--r--drivers/video/via/share.h2
-rw-r--r--drivers/video/via/via_modesetting.c8
-rw-r--r--drivers/video/via/via_modesetting.h6
-rw-r--r--drivers/video/videomode.c39
369 files changed, 22394 insertions, 11557 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea..b8e2014cb9c 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
};
static struct _intel_private {
- struct intel_gtt base;
const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
+ phys_addr_t scratch_page_dma;
int refcount;
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+ phys_addr_t gma_bus_addr;
+ /* Size of memory reserved for graphics by the BIOS */
+ unsigned int stolen_size;
+ /* Total number of gtt entries. */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
- intel_private.base.scratch_page_dma = dma_addr;
+ intel_private.scratch_page_dma = dma_addr;
} else
- intel_private.base.scratch_page_dma = page_to_phys(page);
+ intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
- return intel_private.base.gtt_mappable_entries;
+ return intel_private.gtt_mappable_entries;
}
}
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
intel_gtt_teardown_scratch_page();
}
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+#endif
+ return 0;
+}
+
+static bool intel_gtt_can_wc(void)
+{
+ if (INTEL_GTT_GEN <= 2)
+ return false;
+
+ if (INTEL_GTT_GEN >= 6)
+ return false;
+
+ /* Reports of major corruption with ILK vt'd enabled */
+ if (needs_ilk_vtd_wa())
+ return false;
+
+ return true;
+}
+
static int intel_gtt_init(void)
{
u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
if (ret != 0)
return ret;
- intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
- intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+ intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
- intel_private.base.gtt_total_entries * 4,
- intel_private.base.gtt_mappable_entries * 4);
+ intel_private.gtt_total_entries * 4,
+ intel_private.gtt_mappable_entries * 4);
- gtt_map_size = intel_private.base.gtt_total_entries * 4;
+ gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL;
- if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
+ if (intel_gtt_can_wc())
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
gtt_map_size);
if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
- intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
- intel_private.base.stolen_size = intel_gtt_stolen_size();
+ intel_private.stolen_size = intel_gtt_stolen_size();
- intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+ intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
- intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0;
}
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size;
int i;
- aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
- / MB(1);
+ aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
- agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0;
}
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
if (intel_private.clear_fake_agp) {
- int start = intel_private.base.stolen_size / PAGE_SIZE;
- int end = intel_private.base.gtt_mappable_entries;
+ int start = intel_private.stolen_size / PAGE_SIZE;
+ int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0)
goto out;
- if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
+ if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err;
if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
- intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
intel_gtt_clear_range(pg_start, mem->page_count);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static inline int needs_idle_maps(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- const unsigned short gpu_devid = intel_private.pcidev->device;
-
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
-}
-
static int i9xx_setup(void)
{
u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
break;
}
- if (needs_idle_maps())
- intel_private.base.do_idle_maps = 1;
-
intel_i9xx_setup_flush();
return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, unsigned long *mappable_end)
{
- return &intel_private.base;
+ *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+ *stolen_size = intel_private.stolen_size;
+ *mappable_base = intel_private.gma_bus_addr;
+ *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
EXPORT_SYMBOL(intel_gtt_get);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd..30879df3dae 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/ stub/
+obj-y += drm/ vga/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 983201b450f..1e82882da9d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ select HDMI
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
@@ -69,6 +70,8 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+source "drivers/gpu/drm/i2c/Kconfig"
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -96,6 +99,7 @@ config DRM_RADEON
select DRM_TTM
select POWER_SUPPLY
select HWMON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -212,3 +216,7 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
+
+source "drivers/gpu/drm/tilcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81cfcb..0d59b24f8d2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,4 +50,6 @@ obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 2d2c2f8d6dc..df0d0a08097 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,9 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_post_gpu(dev);
drm_mode_config_reset(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
console_lock();
ast_fbdev_set_suspend(dev, 0);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5ccf984f063..528429252f0 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,8 @@ struct ast_private {
struct drm_gem_object *cursor_cache;
uint64_t cursor_cache_gpu_addr;
+ /* Acces to this cache is protected by the crtc->mutex of the only crtc
+ * we have. */
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d9ec77959df..34931fe7d2c 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -40,6 +40,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include "ast_drv.h"
static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -145,9 +146,10 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
return ret;
}
-static int astfb_create(struct ast_fbdev *afbdev,
+static int astfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;
@@ -248,26 +250,10 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static int ast_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = astfb_create(afbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
- .fb_probe = ast_find_or_create_single,
+ .fb_probe = astfb_create,
};
static void ast_fbdev_destroy(struct drm_device *dev,
@@ -290,6 +276,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&afbdev->helper);
vfree(afbdev->sysram);
+ drm_framebuffer_unregister_private(&afb->base);
drm_framebuffer_cleanup(&afb->base);
}
@@ -313,6 +300,10 @@ int ast_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&afbdev->helper, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f668e6cc0f7..f60fd7bd118 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -246,16 +246,8 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- return -EINVAL;
-}
-
static const struct drm_framebuffer_funcs ast_fb_funcs = {
.destroy = ast_user_framebuffer_destroy,
- .create_handle = ast_user_framebuffer_create_handle,
};
@@ -266,13 +258,13 @@ int ast_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
- ast_fb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 6c6b4c87d30..e25afccaf85 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
return ret;
}
-static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
@@ -219,23 +221,6 @@ out_iounmap:
return ret;
}
-static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = cirrusfb_create(gfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
@@ -258,6 +243,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_unregister_private(&gfb->base);
drm_framebuffer_cleanup(&gfb->base);
return 0;
@@ -266,7 +252,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
- .fb_probe = cirrus_fb_find_or_create_single,
+ .fb_probe = cirrusfb_create,
};
int cirrus_fbdev_init(struct cirrus_device *cdev)
@@ -290,6 +276,9 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(cdev->dev);
drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 6a9b12e88d4..35cbae82777 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -23,16 +23,8 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
.destroy = cirrus_user_framebuffer_destroy,
- .create_handle = cirrus_user_framebuffer_create_handle,
};
int cirrus_framebuffer_init(struct drm_device *dev,
@@ -42,13 +34,13 @@ int cirrus_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f2d667b8bee..3bdf2a650d9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,54 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_unlock(&crtc->mutex);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
@@ -203,12 +251,10 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
}
/**
- * drm_mode_object_get - allocate a new identifier
+ * drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
@@ -231,24 +277,27 @@ again:
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+
+ if (!ret) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = new_id;
+ obj->type = obj_type;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
+
if (ret == -EAGAIN)
goto again;
- else if (ret)
- return ret;
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
+ return ret;
}
/**
- * drm_mode_object_put - free an identifer
+ * drm_mode_object_put - free a modeset identifer
* @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
+ * @object: object to free
*
* Free @id from @dev's unique identifier pool.
*/
@@ -260,11 +309,24 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
@@ -278,13 +340,18 @@ EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
* RETURNS:
* Zero on success, error code on failure.
*/
@@ -293,16 +360,23 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ mutex_lock(&dev->mode_config.fb_lock);
kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
- return ret;
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
- fb->dev = dev;
- fb->funcs = funcs;
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -315,23 +389,63 @@ static void drm_framebuffer_free(struct kref *kref)
fb->funcs->destroy(fb);
}
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ kref_get(&fb->refcount);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
/**
* drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- struct drm_device *dev = fb->dev;
DRM_DEBUG("FB ID: %d\n", fb->base.id);
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
/**
* drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
@@ -340,29 +454,74 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_reference);
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
*
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
- /*
- * This could be moved to drm_framebuffer_remove(), but for
- * debugging is nice to keep around the list of fb's that are
- * no longer associated w/ a drm_file but are not unreferenced
- * yet. (i915 and omapdrm have debugfs files which will show
- * this.)
- */
- drm_mode_object_put(dev, &fb->base);
+
+ mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
@@ -370,11 +529,13 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Scans all the CRTCs and planes in @dev's mode_config. If they're
- * using @fb, removes it, setting it to NULL.
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
@@ -384,33 +545,53 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_mode_set set;
int ret;
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
}
- }
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- if (plane->fb == fb) {
- /* should turn off the crtc */
- ret = plane->funcs->disable_plane(plane);
- if (ret)
- DRM_ERROR("failed to disable plane with busy fb\n");
- /* disconnect the plane from the fb and crtc: */
- plane->fb = NULL;
- plane->crtc = NULL;
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
}
+ drm_modeset_unlock_all(dev);
}
- list_del(&fb->filp_head);
-
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -421,9 +602,6 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
- * LOCKING:
- * Takes mode_config lock.
- *
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
@@ -438,7 +616,9 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
crtc->funcs = funcs;
crtc->invert_dimensions = false;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ mutex_init(&crtc->mutex);
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -450,7 +630,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
dev->mode_config.num_crtc++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -460,9 +640,6 @@ EXPORT_SYMBOL(drm_crtc_init);
* drm_crtc_cleanup - Cleans up the core crtc usage.
* @crtc: CRTC to cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
*/
@@ -484,9 +661,6 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
* @connector: connector the new mode
* @mode: mode data
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
@@ -501,9 +675,6 @@ EXPORT_SYMBOL(drm_mode_probed_add);
* @connector: connector list to modify
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
@@ -519,10 +690,7 @@ EXPORT_SYMBOL(drm_mode_remove);
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Takes mode config lock.
+ * @connector_type: user visible type of the connector
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
@@ -537,7 +705,7 @@ int drm_connector_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
@@ -567,7 +735,7 @@ int drm_connector_init(struct drm_device *dev,
dev->mode_config.dpms_property, 0);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -577,9 +745,6 @@ EXPORT_SYMBOL(drm_connector_init);
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
- * LOCKING:
- * Takes mode config lock.
- *
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
@@ -596,11 +761,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
- mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -622,7 +785,7 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
@@ -636,7 +799,7 @@ int drm_encoder_init(struct drm_device *dev,
dev->mode_config.num_encoder++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -645,11 +808,11 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -661,7 +824,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
@@ -695,7 +858,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -705,7 +868,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
@@ -713,7 +876,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
list_del(&plane->head);
dev->mode_config.num_plane--;
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -721,9 +884,6 @@ EXPORT_SYMBOL(drm_plane_cleanup);
* drm_mode_create - create a new display mode
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
@@ -751,9 +911,6 @@ EXPORT_SYMBOL(drm_mode_create);
* @dev: DRM device
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
@@ -978,16 +1135,19 @@ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
- * LOCKING:
- * None, should happen single threaded at init time.
- *
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
@@ -997,9 +1157,9 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
/* Just to be sure */
dev->mode_config.num_fb = 0;
@@ -1057,12 +1217,13 @@ EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
@@ -1089,6 +1250,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_property_destroy(dev, property);
}
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
}
@@ -1112,9 +1282,6 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
@@ -1151,9 +1318,6 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
@@ -1188,13 +1352,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
/**
* drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
@@ -1228,8 +1388,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&file_priv->fbs_lock);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
@@ -1237,6 +1397,23 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each(lh, &file_priv->fbs)
fb_count++;
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
@@ -1260,21 +1437,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
@@ -1370,19 +1532,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_connectors, card_res->count_encoders);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a CRTC configuration structure to return to the user.
*
@@ -1402,7 +1560,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1430,19 +1588,15 @@ int drm_mode_getcrtc(struct drm_device *dev,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a connector configuration structure to return to the user.
*
@@ -1575,6 +1729,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
mutex_unlock(&dev->mode_config.mutex);
+
return ret;
}
@@ -1589,7 +1744,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
@@ -1608,7 +1763,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->possible_clones = encoder->possible_clones;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1618,9 +1773,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
@@ -1635,7 +1787,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
config = &dev->mode_config;
/*
@@ -1657,7 +1809,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
plane_resp->count_planes = config->num_plane;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1667,9 +1819,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
@@ -1685,7 +1834,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -1725,7 +1874,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
plane_resp->count_format_types = plane->format_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1733,10 +1882,7 @@ out:
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
- * @file_prive: DRM file info
- *
- * LOCKING:
- * Takes mode config lock.
+ * @file_priv: DRM file info
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
@@ -1748,7 +1894,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
@@ -1756,8 +1902,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
@@ -1767,16 +1911,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
+ drm_modeset_unlock_all(dev);
goto out;
}
@@ -1790,15 +1936,13 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
crtc = obj_to_crtc(obj);
- obj = drm_mode_object_find(dev, plane_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
- fb = obj_to_fb(obj);
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
@@ -1844,31 +1988,62 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
+ drm_modeset_lock_all(dev);
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
+ old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
+ fb = NULL;
}
+ drm_modeset_unlock_all(dev);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
return ret;
}
/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
*
- * LOCKING:
- * Takes mode config lock.
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb, *old_fb;
+ int ret;
+
+ old_fb = crtc->fb;
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Build a new CRTC configuration based on user request.
*
@@ -1899,7 +2074,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1921,16 +2096,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
fb = crtc->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
} else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
goto out;
}
- fb = obj_to_fb(obj);
}
mode = drm_mode_create(dev);
@@ -2027,12 +2202,15 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
kfree(connector_set);
drm_mode_destroy(dev, mode);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -2050,15 +2228,14 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
ret = -ENXIO;
@@ -2078,7 +2255,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
}
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -2089,7 +2267,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
switch (bpp) {
case 8:
- fmt = DRM_FORMAT_RGB332;
+ fmt = DRM_FORMAT_C8;
break;
case 16:
if (depth == 15)
@@ -2120,13 +2298,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request.
*
@@ -2161,24 +2335,19 @@ int drm_mode_addfb(struct drm_device *dev,
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2304,13 +2473,9 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format.
*
@@ -2350,33 +2515,28 @@ int drm_mode_addfb2(struct drm_device *dev,
if (ret)
return ret;
- mutex_lock(&dev->mode_config.mutex);
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Remove the FB specified by the user.
*
@@ -2388,50 +2548,49 @@ out:
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
- int ret = 0;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we really get a framebuffer back. */
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ mutex_lock(&file_priv->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
+ if (!found)
+ goto fail_lookup;
- if (!found) {
- ret = -EINVAL;
- goto out;
- }
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
drm_framebuffer_remove(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return -EINVAL;
}
/**
* drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
@@ -2444,30 +2603,28 @@ int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
r->height = fb->height;
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
- fb->funcs->create_handle(fb, file_priv, &r->handle);
+ if (fb->funcs->create_handle)
+ ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
+ else
+ ret = -ENODEV;
+
+ drm_framebuffer_unreference(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2477,7 +2634,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
@@ -2486,13 +2642,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -2530,27 +2682,26 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
+ drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
+ drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
- goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(fb);
+
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @priv: drm file for the ioctl
*
* Destroy all the FBs associated with @filp.
*
@@ -2564,11 +2715,20 @@ void drm_fb_release(struct drm_file *priv)
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&priv->fbs_lock);
}
/**
@@ -2660,10 +2820,9 @@ EXPORT_SYMBOL(drm_mode_detachmode_crtc);
/**
* drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* This attaches a user specified mode to an connector.
* Called by the user via ioctl.
@@ -2684,7 +2843,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2708,17 +2867,16 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
drm_mode_attachmode(dev, connector, mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Called by the user via ioctl.
*
@@ -2738,7 +2896,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2755,7 +2913,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
ret = drm_mode_detachmode(dev, connector, &mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3001,7 +3159,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
@@ -3079,7 +3237,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = blob_count;
}
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3130,7 +3288,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
@@ -3148,7 +3306,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
out_resp->length = blob->length;
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3290,7 +3448,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -3327,7 +3485,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
arg->count_props = props_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3344,7 +3502,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
@@ -3382,7 +3540,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3444,7 +3602,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3485,7 +3643,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3503,7 +3661,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3536,7 +3694,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
goto out;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3546,7 +3704,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int hdisplay, vdisplay;
@@ -3556,12 +3714,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
- goto out;
+ return -EINVAL;
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (crtc->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -3574,10 +3732,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (crtc->funcs->page_flip == NULL)
goto out;
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb)
goto out;
- fb = obj_to_fb(obj);
hdisplay = crtc->mode.hdisplay;
vdisplay = crtc->mode.vdisplay;
@@ -3623,6 +3780,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
+ old_fb = crtc->fb;
ret = crtc->funcs->page_flip(crtc, fb, e);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3631,10 +3789,27 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -3702,6 +3877,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a3770fbd77..c194f4e680a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,11 +29,11 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
-#include "drm_edid_modes.h"
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -87,9 +87,6 @@ static struct edid_quirk {
int product_id;
u32 quirks;
} edid_quirk_list[] = {
- /* ASUS VW222S */
- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
-
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
@@ -130,6 +127,746 @@ static struct edid_quirk {
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 5 - 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 6 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 11 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 12 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 13 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 14 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 15 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 16 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 17 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 18 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 19 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 20 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 21 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 26 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 27 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 28 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 29 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 30 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 31 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 32 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 33 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 34 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 35 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 36 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 37 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 38 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 39 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 40 - 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 41 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 42 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 43 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 44 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 47 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 48 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 49 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 50 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 53 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 54 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 57 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 58 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 61 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 62 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 63 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 64 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@@ -357,10 +1094,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
break;
}
}
- if (i == 4)
+
+ if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
+
+ connector->bad_edid_counter++;
+ }
}
if (valid_extensions != block[0x7e]) {
@@ -541,7 +1282,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
{
int i;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
@@ -1082,7 +1823,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
@@ -1117,7 +1858,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
@@ -1146,7 +1887,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
@@ -1483,9 +2224,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
/**
* Search EDID for CEA extension block.
@@ -1513,16 +2256,19 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
-/*
- * Looks for a CEA mode matching given drm_display_mode.
- * Returns its CEA Video ID code, or 0 if not found.
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
*/
-u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
- for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
if (drm_mode_equal(to_match, cea_mode))
@@ -1542,7 +2288,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
- if (cea_mode < drm_num_cea_modes) {
+ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
@@ -1902,6 +2648,37 @@ end:
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
@@ -2020,7 +2797,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -2081,20 +2859,33 @@ int drm_add_modes_noedid(struct drm_connector *connector,
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
- * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
- * @mode: mode
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
*
- * RETURNS:
- * The VIC number, 0 in case it's not a CEA-861 mode.
+ * Returns 0 on success or a negative error code on failure.
*/
-uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode)
{
- uint8_t i;
+ int err;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ err = hdmi_avi_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->video_code = drm_match_cea_mode(mode);
+ if (!frame->video_code)
+ return 0;
- for (i = 0; i < drm_num_cea_modes; i++)
- if (drm_mode_equal(mode, &edid_cea_modes[i]))
- return i + 1;
+ frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+ frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
return 0;
}
-EXPORT_SYMBOL(drm_mode_cea_vic);
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
deleted file mode 100644
index 5dbf7d2557b..00000000000
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2010 Red Hat, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <drm/drmP.h>
-#include <drm/drm_edid.h>
-
-/*
- * Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@120Hz RB */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
- 880, 960, 0, 600, 603, 607, 636, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1168, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@120Hz RB */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
- 1104, 1184, 0, 768, 771, 775, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 790, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@120Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 823, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@120Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 847, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@120Hz RB */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
- 1360, 1440, 0, 960, 963, 967, 1017, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@120Hz RB */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
- 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@120Hz RB */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
- 1440, 1520, 0, 768, 771, 776, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@120Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 926, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@120Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 953, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@120Hz RB */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
- 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@120Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@120Hz RB */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
- 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1856x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@120Hz RB */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
- 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@120Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@120Hz RB */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
- 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@120Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
-static const struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-struct minimode {
- short w;
- short h;
- short r;
- short rb;
-};
-
-static const struct minimode est3_modes[] = {
- /* byte 6 */
- { 640, 350, 85, 0 },
- { 640, 400, 85, 0 },
- { 720, 400, 85, 0 },
- { 640, 480, 85, 0 },
- { 848, 480, 60, 0 },
- { 800, 600, 85, 0 },
- { 1024, 768, 85, 0 },
- { 1152, 864, 75, 0 },
- /* byte 7 */
- { 1280, 768, 60, 1 },
- { 1280, 768, 60, 0 },
- { 1280, 768, 75, 0 },
- { 1280, 768, 85, 0 },
- { 1280, 960, 60, 0 },
- { 1280, 960, 85, 0 },
- { 1280, 1024, 60, 0 },
- { 1280, 1024, 85, 0 },
- /* byte 8 */
- { 1360, 768, 60, 0 },
- { 1440, 900, 60, 1 },
- { 1440, 900, 60, 0 },
- { 1440, 900, 75, 0 },
- { 1440, 900, 85, 0 },
- { 1400, 1050, 60, 1 },
- { 1400, 1050, 60, 0 },
- { 1400, 1050, 75, 0 },
- /* byte 9 */
- { 1400, 1050, 85, 0 },
- { 1680, 1050, 60, 1 },
- { 1680, 1050, 60, 0 },
- { 1680, 1050, 75, 0 },
- { 1680, 1050, 85, 0 },
- { 1600, 1200, 60, 0 },
- { 1600, 1200, 65, 0 },
- { 1600, 1200, 70, 0 },
- /* byte 10 */
- { 1600, 1200, 75, 0 },
- { 1600, 1200, 85, 0 },
- { 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
- { 1856, 1392, 60, 0 },
- { 1856, 1392, 75, 0 },
- { 1920, 1200, 60, 1 },
- { 1920, 1200, 60, 0 },
- /* byte 11 */
- { 1920, 1200, 75, 0 },
- { 1920, 1200, 85, 0 },
- { 1920, 1440, 60, 0 },
- { 1920, 1440, 75, 0 },
-};
-static const int num_est3_modes = ARRAY_SIZE(est3_modes);
-
-static const struct minimode extra_modes[] = {
- { 1024, 576, 60, 0 },
- { 1366, 768, 60, 0 },
- { 1600, 900, 60, 0 },
- { 1680, 945, 60, 0 },
- { 1920, 1080, 60, 0 },
- { 2048, 1152, 60, 0 },
- { 2048, 1536, 60, 0 },
-};
-static const int num_extra_modes = ARRAY_SIZE(extra_modes);
-
-/*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode edid_cea_modes[] = {
- /* 1 - 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 4 - 1280x720@60Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 5 - 1920x1080i@60Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 6 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 7 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 8 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 9 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 10 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 11 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 12 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 13 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 14 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 15 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 16 - 1920x1080@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 17 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 18 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 19 - 1280x720@50Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 20 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 21 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 22 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 23 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 24 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 25 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 26 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 27 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 28 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 29 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 30 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 31 - 1920x1080@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 32 - 1920x1080@24Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
- 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 33 - 1920x1080@25Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 34 - 1920x1080@30Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 35 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 36 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 37 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 38 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 39 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
- 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 40 - 1920x1080i@100Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 41 - 1280x720@100Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 42 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 43 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 44 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 45 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 46 - 1920x1080i@120Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 47 - 1280x720@120Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 48 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 49 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 50 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 51 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 52 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 53 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 54 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 55 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 56 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 57 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 58 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 59 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 60 - 1280x720@24Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 61 - 1280x720@25Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
- 3740, 3960, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 62 - 1280x720@30Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 63 - 1920x1080@120Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 64 - 1920x1080@100Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 63e733408b6..48c52f7df4e 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -123,3 +123,66 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
module_put(module);
}
EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_dpms);
+
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
+
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_prepare);
+
+void drm_i2c_encoder_commit(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_commit);
+
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
+
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_detect);
+
+void drm_i2c_encoder_save(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->save(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_save);
+
+void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->restore(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fd9d0af4d53..0b5af7d0edb 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -85,6 +85,11 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
if (!fb_cma)
return ERR_PTR(-ENOMEM);
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
if (ret) {
dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
@@ -92,11 +97,6 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
return fb_cma;
}
@@ -180,6 +180,59 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+#ifdef CONFIG_DEBUG_FS
+/**
+ * drm_fb_cma_describe() - Helper to dump information about a single
+ * CMA framebuffer object
+ */
+void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ drm_gem_cma_describe(fb_cma->obj[i], m);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ * in debugfs.
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ drm_fb_cma_describe(fb, m);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = sys_fillrect,
@@ -266,6 +319,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_drm_fb_cma_destroy:
+ drm_framebuffer_unregister_private(fb);
drm_fb_cma_destroy(fb);
err_framebuffer_release:
framebuffer_release(fbi);
@@ -274,23 +328,8 @@ err_drm_gem_cma_free_object:
return ret;
}
-static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- if (!helper->fb) {
- ret = drm_fbdev_cma_create(helper, sizes);
- if (ret < 0)
- return ret;
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_probe,
+ .fb_probe = drm_fbdev_cma_create,
};
/**
@@ -332,6 +371,9 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, preferred_bpp);
if (ret < 0) {
dev_err(dev->dev, "Failed to set inital hw configuration.\n");
@@ -370,8 +412,10 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
framebuffer_release(info);
}
- if (fbdev_cma->fb)
+ if (fbdev_cma->fb) {
+ drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+ }
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -386,8 +430,13 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma)
+ if (fbdev_cma) {
+ struct drm_device *dev = fbdev_cma->fb_helper.dev;
+
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+ drm_modeset_unlock_all(dev);
+ }
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 954d175bd7f..59d6b9bf204 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
*/
-/* simple single crtc case helper function */
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -163,6 +190,10 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
+/**
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -208,6 +239,10 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
return NULL;
}
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -239,13 +274,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
bool error = false;
int i, ret;
+
+ drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- ret = mode_set->crtc->funcs->set_config(mode_set);
+ ret = drm_mode_set_config_internal(mode_set);
if (ret)
error = true;
}
@@ -253,6 +299,10 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
@@ -272,7 +322,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
return error;
}
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
/*
@@ -285,30 +335,36 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
-EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+ return true;
}
-EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- drm_fb_helper_restore();
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
@@ -335,9 +391,22 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -352,9 +421,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
dev->mode_config.dpms_property, dpms_mode);
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
@@ -398,6 +472,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->crtc_info);
}
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
@@ -526,6 +618,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -565,6 +662,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -657,13 +759,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
-/* this will let fbcon do the mode init */
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
int ret;
int i;
@@ -672,16 +780,15 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
@@ -691,6 +798,11 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -701,7 +813,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -711,22 +828,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- ret = crtc->funcs->set_config(modeset);
+ ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- int new_fb = 0;
+ int ret = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
@@ -804,27 +926,30 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
/* push down into drivers */
- new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
- if (new_fb < 0)
- return new_fb;
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
info = fb_helper->fbdev;
- /* set the fb pointer */
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
for (i = 0; i < fb_helper->crtc_count; i++)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- if (new_fb) {
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
- } else {
- drm_fb_helper_set_par(info);
- }
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
@@ -834,13 +959,25 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- if (new_fb)
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -861,6 +998,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1284,6 +1435,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
+ modeset->fb = NULL;
}
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -1300,9 +1452,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
}
}
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
out:
kfree(crtcs);
kfree(modes);
@@ -1310,18 +1474,23 @@ out:
}
/**
- * drm_helper_initial_config - setup a sane initial connector configuration
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
- * LOCKING:
- * Called at init time by the driver to set up the @fb_helper initial
- * configuration, must take the mode config lock.
- *
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
@@ -1330,9 +1499,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(fb_helper->dev);
-
drm_fb_helper_parse_command_line(fb_helper);
count = drm_fb_helper_probe_connector_modes(fb_helper,
@@ -1355,12 +1521,17 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
- * LOCKING:
- * Called at runtime, must take mode config lock.
- *
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
@@ -1369,23 +1540,14 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
- int bound = 0, crtcs_bound = 0;
- struct drm_crtc *crtc;
if (!fb_helper->fb)
return 0;
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb)
- crtcs_bound++;
- if (crtc->fb == fb_helper->fb)
- bound++;
- }
-
- if (bound < crtcs_bound) {
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1397,9 +1559,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_setup_crtcs(fb_helper);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ drm_fb_helper_set_par(fb_helper->fbdev);
+
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 133b4132983..13fdcd10a60 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -276,6 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1aa8fee1e86..0a7e011509b 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -249,3 +249,24 @@ int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
return drm_gem_handle_delete(file_priv, handle);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+{
+ struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_device *dev = obj->dev;
+ uint64_t off = 0;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
+ obj->name, obj->refcount.refcount.counter,
+ off, cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+ seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 19c01ca3cc7..a6a8643a6a7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -505,6 +505,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Valid dotclock? */
if (dotclock > 0) {
+ int frame_size;
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
@@ -512,7 +513,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
- framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ frame_size = crtc->hwmode.crtc_htotal *
+ crtc->hwmode.crtc_vtotal;
+ framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
+ dotclock);
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
@@ -863,6 +867,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
now = get_drm_timestamp();
}
+ e->pipe = crtc;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1218,8 +1223,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret;
unsigned int flags, seq, crtc, high_crtc;
- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
- return -EINVAL;
+ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+ return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2aa331499f8..db1e2d6f90d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
-{
- return hole_node->start + hole_node->size;
-}
-
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
-{
- struct drm_mm_node *next_node =
- list_entry(hole_node->node_list.next, struct drm_mm_node,
- node_list);
-
- return next_node->start;
-}
-
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
- BUG_ON(!hole_node->hole_follows || node->allocated);
+ BUG_ON(node->allocated);
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
}
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic)
+{
+ struct drm_mm_node *hole, *node;
+ unsigned long end = start + size;
+ unsigned long hole_start;
+ unsigned long hole_end;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > start || hole_end < end)
+ continue;
+
+ node = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ node->start = start;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list);
+
+ if (start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ }
+
+ return node;
+ }
+
+ WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mm_create_block);
+
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
@@ -253,7 +284,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
@@ -327,12 +358,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) {
- BUG_ON(drm_mm_hole_node_start(node)
- == drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
- BUG_ON(drm_mm_hole_node_start(node)
- != drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
+
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
@@ -390,6 +422,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -397,17 +431,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry);
-
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
- BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -434,6 +464,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -441,13 +473,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
- start : drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
- end : drm_mm_hole_node_end(entry);
-
- BUG_ON(!entry->hole_follows);
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d8da30e90db..04fa6f1808d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -35,6 +35,8 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
/**
* drm_mode_debug_printmodeline - debug print a mode
@@ -504,6 +506,74 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
}
EXPORT_SYMBOL(drm_gtf_mode);
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
+{
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_set_name(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
+#endif
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 754bc96e10c..bd719e936e1 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -439,78 +439,67 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return 0;
}
-#else
-
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
-#endif
-
-EXPORT_SYMBOL(drm_pci_init);
-
-/*@}*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(pdriver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_pci_exit);
-
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
- int pos;
- u32 lnkcap = 0, lnkcap2 = 0;
+ u32 lnkcap, lnkcap2;
*mask = 0;
if (!dev->pdev)
return -EINVAL;
- if (!pci_is_pcie(dev->pdev))
- return -EINVAL;
-
root = dev->pdev->bus->self;
- pos = pci_pcie_cap(root);
- if (!pos)
- return -EINVAL;
-
/* we've been informed via and serverworks don't make the cut */
if (root->vendor == PCI_VENDOR_ID_VIA ||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
return -EINVAL;
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
- lnkcap &= PCI_EXP_LNKCAP_SLS;
- lnkcap2 &= 0xfe;
-
- if (lnkcap2) { /* PCIE GEN 3.0 */
+ if (lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
- } else {
- if (lnkcap & 1)
+ } else { /* pre-r3.0 */
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & 2)
- *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
}
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ return -1;
+}
+
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7f125738f44..366910ddcfc 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -53,7 +53,8 @@
* Self-importing: if userspace is using PRIME as a replacement for flink
* then it will get a fd->handle request for a GEM object that it created.
* Drivers should detect this situation and return back the gem object
- * from the dma-buf private.
+ * from the dma-buf private. Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
*/
struct drm_prime_member {
@@ -62,6 +63,137 @@ struct drm_prime_member {
uint32_t handle;
};
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!IS_ERR_OR_NULL(sgt))
+ dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ mutex_unlock(&obj->dev->struct_mutex);
+ return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ if (obj->export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+}
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap = drm_gem_dmabuf_kmap,
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+ .kunmap = drm_gem_dmabuf_kunmap,
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
+ * five lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * driver's scatter/gather table
+ */
+
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ if (dev->driver->gem_prime_pin) {
+ int ret = dev->driver->gem_prime_pin(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+ 0600);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!dev->driver->gem_prime_import_sg_table)
+ return ERR_PTR(-EINVAL);
+
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(dma_buf);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle)
{
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 3cec3061141..34a156f0c33 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
- dev->dev = &usbdev->dev;
+ dev->dev = &interface->dev;
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 294c0513f58..0e04f4ea441 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -99,6 +99,10 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* This fb should have only one gem object. */
+ if (WARN_ON(exynos_fb->buf_cnt != 1))
+ return -EINVAL;
+
return drm_gem_handle_create(file_priv,
&exynos_fb->exynos_gem_obj[0]->base, handle);
}
@@ -217,23 +221,25 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_fb *exynos_fb;
int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- return ERR_PTR(-ENOENT);
- }
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
@@ -241,43 +247,44 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *exynos_gem_obj;
- int ret;
-
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- kfree(exynos_fb);
- return ERR_PTR(-ENOENT);
+ ret = -ENOENT;
+ exynos_fb->buf_cnt = i;
+ goto err_unreference;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
+ exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ goto err_unreference;
}
-
- exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- for (i = 0; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *gem_obj;
-
- gem_obj = exynos_fb->exynos_gem_obj[i];
- drm_gem_object_unreference_unlocked(&gem_obj->base);
- }
-
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ DRM_ERROR("failed to init framebuffer.\n");
+ goto err_unreference;
}
return &exynos_fb->fb;
+
+err_unreference:
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct drm_gem_object *obj;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+err_free:
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 71f867340a8..68f0045f86b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -226,36 +226,8 @@ out:
return ret;
}
-static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * with !helper->fb, it means that this funcion is called first time
- * and after that, the helper->fb would be used as clone mode.
- */
- if (!helper->fb) {
- ret = exynos_drm_fbdev_create(helper, sizes);
- if (ret < 0) {
- DRM_ERROR("failed to create fbdev.\n");
- return ret;
- }
-
- /*
- * fb_helper expects a value more than 1 if succeed
- * because register_framebuffer() should be called.
- */
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
- .fb_probe = exynos_drm_fbdev_probe,
+ .fb_probe = exynos_drm_fbdev_create,
};
int exynos_drm_fbdev_init(struct drm_device *dev)
@@ -295,6 +267,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_ERROR("failed to set up hw configuration.\n");
@@ -326,8 +301,10 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
/* release linux framebuffer */
@@ -374,5 +351,7 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fb2f81b8063..3b0da0378ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/dma-attrs.h>
+#include <linux/of.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -429,7 +430,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->pages = pages;
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
@@ -1239,6 +1240,14 @@ static int g2d_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_g2d_match[] = {
+ { .compatible = "samsung,exynos5250-g2d" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
+#endif
+
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
@@ -1246,5 +1255,6 @@ struct platform_driver g2d_driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
+ .of_match_table = of_match_ptr(exynos_g2d_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 47318077652..67e17ce112b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -329,17 +329,11 @@ static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
{
struct drm_file *file_priv;
- mutex_lock(&drm_dev->struct_mutex);
-
/* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
- if (file_priv->filp == filp) {
- mutex_unlock(&drm_dev->struct_mutex);
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
+ if (file_priv->filp == filp)
return file_priv;
- }
- }
- mutex_unlock(&drm_dev->struct_mutex);
WARN_ON(1);
return ERR_PTR(-EFAULT);
@@ -400,9 +394,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
*/
drm_gem_object_reference(obj);
- mutex_lock(&drm_dev->struct_mutex);
drm_vm_open_locked(drm_dev, vma);
- mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -432,6 +424,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
/*
+ * We have to use gem object and its fops for specific mmaper,
+ * but vm_mmap() can deliver only filp. So we have to change
+ * filp->f_op and filp->private_data temporarily, then restore
+ * again. So it is important to keep lock until restoration the
+ * settings to prevent others from misuse of filp->f_op or
+ * filp->private_data.
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /*
* Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
@@ -448,13 +450,20 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
if (IS_ERR((void *)addr)) {
- file_priv->filp->private_data = file_priv;
+ /* check filp->f_op, filp->private_data are restored */
+ if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
+ file_priv->filp->f_op = fops_get(dev->driver->fops);
+ file_priv->filp->private_data = file_priv;
+ }
+ mutex_unlock(&dev->struct_mutex);
return PTR_ERR((void *)addr);
}
+ mutex_unlock(&dev->struct_mutex);
+
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 28644539b30..7c27df03c9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -124,9 +124,21 @@ static struct edid *drm_hdmi_get_edid(struct device *dev,
static int drm_hdmi_check_timing(struct device *dev, void *timing)
{
struct drm_hdmi_context *ctx = to_context(dev);
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /*
+ * Both, mixer and hdmi should be able to handle the requested mode.
+ * If any of the two fails, return mode as BAD.
+ */
+
+ if (mixer_ops && mixer_ops->check_timing)
+ ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing);
+
+ if (ret)
+ return ret;
+
if (hdmi_ops && hdmi_ops->check_timing)
return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index d80516fc9ed..b7faa366230 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -32,7 +32,7 @@ struct exynos_hdmi_ops {
bool (*is_connected)(void *ctx);
struct edid *(*get_edid)(void *ctx,
struct drm_connector *connector);
- int (*check_timing)(void *ctx, void *timing);
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
int (*power_on)(void *ctx, int mode);
/* manager */
@@ -58,6 +58,9 @@ struct exynos_mixer_ops {
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
+
+ /* display */
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
};
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 53b7deea8ab..598e60f57d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,7 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x4
+#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 233247505ff..2c5f266154a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -87,6 +87,73 @@ struct hdmi_resources {
int regul_count;
};
+struct hdmi_tg_regs {
+ u8 cmd[1];
+ u8 h_fsz[2];
+ u8 hact_st[2];
+ u8 hact_sz[2];
+ u8 v_fsz[2];
+ u8 vsync[2];
+ u8 vsync2[2];
+ u8 vact_st[2];
+ u8 vact_sz[2];
+ u8 field_chg[2];
+ u8 vact_st2[2];
+ u8 vact_st3[2];
+ u8 vact_st4[2];
+ u8 vsync_top_hdmi[2];
+ u8 vsync_bot_hdmi[2];
+ u8 field_top_hdmi[2];
+ u8 field_bot_hdmi[2];
+ u8 tg_3d[1];
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v2_blank[2];
+ u8 v1_blank[2];
+ u8 v_line[2];
+ u8 h_line[2];
+ u8 hsync_pol[1];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f0[2];
+ u8 v_blank_f1[2];
+ u8 h_sync_start[2];
+ u8 h_sync_end[2];
+ u8 v_sync_line_bef_2[2];
+ u8 v_sync_line_bef_1[2];
+ u8 v_sync_line_aft_2[2];
+ u8 v_sync_line_aft_1[2];
+ u8 v_sync_line_aft_pxl_2[2];
+ u8 v_sync_line_aft_pxl_1[2];
+ u8 v_blank_f2[2]; /* for 3D mode */
+ u8 v_blank_f3[2]; /* for 3D mode */
+ u8 v_blank_f4[2]; /* for 3D mode */
+ u8 v_blank_f5[2]; /* for 3D mode */
+ u8 v_sync_line_aft_3[2];
+ u8 v_sync_line_aft_4[2];
+ u8 v_sync_line_aft_5[2];
+ u8 v_sync_line_aft_6[2];
+ u8 v_sync_line_aft_pxl_3[2];
+ u8 v_sync_line_aft_pxl_4[2];
+ u8 v_sync_line_aft_pxl_5[2];
+ u8 v_sync_line_aft_pxl_6[2];
+ u8 vact_space_1[2];
+ u8 vact_space_2[2];
+ u8 vact_space_3[2];
+ u8 vact_space_4[2];
+ u8 vact_space_5[2];
+ u8 vact_space_6[2];
+};
+
+struct hdmi_v14_conf {
+ int pixel_clock;
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+ int cea_video_id;
+};
+
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -104,6 +171,7 @@ struct hdmi_context {
/* current hdmiphy conf index */
int cur_conf;
+ struct hdmi_v14_conf mode_conf;
struct hdmi_resources res;
@@ -392,586 +460,132 @@ static const struct hdmi_v13_conf hdmi_v13_confs[] = {
};
/* HDMI Version 1.4 */
-static const u8 hdmiphy_conf27_027[32] = {
- 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
- 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
- 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_176[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
- 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_25[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf148_5[32] = {
- 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
-};
-
-struct hdmi_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vact_st3_l;
- u8 vact_st3_h;
- u8 vact_st4_l;
- u8 vact_st4_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
- u8 tg_3d;
-};
-
-struct hdmi_core_regs {
- u8 h_blank[2];
- u8 v2_blank[2];
- u8 v1_blank[2];
- u8 v_line[2];
- u8 h_line[2];
- u8 hsync_pol[1];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f0[2];
- u8 v_blank_f1[2];
- u8 h_sync_start[2];
- u8 h_sync_end[2];
- u8 v_sync_line_bef_2[2];
- u8 v_sync_line_bef_1[2];
- u8 v_sync_line_aft_2[2];
- u8 v_sync_line_aft_1[2];
- u8 v_sync_line_aft_pxl_2[2];
- u8 v_sync_line_aft_pxl_1[2];
- u8 v_blank_f2[2]; /* for 3D mode */
- u8 v_blank_f3[2]; /* for 3D mode */
- u8 v_blank_f4[2]; /* for 3D mode */
- u8 v_blank_f5[2]; /* for 3D mode */
- u8 v_sync_line_aft_3[2];
- u8 v_sync_line_aft_4[2];
- u8 v_sync_line_aft_5[2];
- u8 v_sync_line_aft_6[2];
- u8 v_sync_line_aft_pxl_3[2];
- u8 v_sync_line_aft_pxl_4[2];
- u8 v_sync_line_aft_pxl_5[2];
- u8 v_sync_line_aft_pxl_6[2];
- u8 vact_space_1[2];
- u8 vact_space_2[2];
- u8 vact_space_3[2];
- u8 vact_space_4[2];
- u8 vact_space_5[2];
- u8 vact_space_6[2];
-};
-
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf {
- int width;
- int height;
- int vrefresh;
- bool interlace;
- int cea_video_id;
- const u8 *hdmiphy_data;
- const struct hdmi_preset_conf *conf;
-};
-
-static const struct hdmi_preset_conf hdmi_conf_480p60 = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v2_blank = {0x0d, 0x02},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x0d, 0x02},
- .h_line = {0x5a, 0x03},
- .hsync_pol = {0x01},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x00},
- .h_sync_end = {0x4c, 0x00},
- .v_sync_line_bef_2 = {0x0f, 0x00},
- .v_sync_line_bef_1 = {0x09, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
- },
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
};
-static const struct hdmi_preset_conf hdmi_conf_720p50 = {
- .core = {
- .h_blank = {0xbc, 0x02},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0xbc, 0x07},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0xb6, 0x01},
- .h_sync_end = {0xde, 0x01},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0xbc, 0x07, /* h_fsz */
- 0xbc, 0x02, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+/* list of all required phy config settings */
+static const struct hdmiphy_config hdmiphy_v14_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0x72, 0x06},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x6c, 0x00},
- .h_sync_end = {0x94, 0x00},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
+ 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x72, 0x01, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0x38, 0x07},
- .v_sync_line_aft_pxl_1 = {0x38, 0x07},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0xa4, 0x04},
- .v_sync_line_aft_pxl_1 = {0xa4, 0x04},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
+ 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
+ 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
+ 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
+ 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
},
};
-static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
-};
-
struct hdmi_infoframe {
enum HDMI_PACKET_TYPE type;
u8 ver;
@@ -1275,31 +889,6 @@ static int hdmi_v13_conf_index(struct drm_display_mode *mode)
return -EINVAL;
}
-static int hdmi_v14_conf_index(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
- if (hdmi_confs[i].width == mode->hdisplay &&
- hdmi_confs[i].height == mode->vdisplay &&
- hdmi_confs[i].vrefresh == mode->vrefresh &&
- hdmi_confs[i].interlace ==
- ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
- true : false))
- return i;
-
- return -EINVAL;
-}
-
-static int hdmi_conf_index(struct hdmi_context *hdata,
- struct drm_display_mode *mode)
-{
- if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_conf_index(mode);
-
- return hdmi_v14_conf_index(mode);
-}
-
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
@@ -1357,7 +946,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
if (hdata->type == HDMI_TYPE13)
vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
else
- vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+ vic = hdata->mode_conf.cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
@@ -1434,44 +1023,51 @@ static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
return -EINVAL;
}
+static int hdmi_v14_find_phy_conf(int pixel_clock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
+ if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
+ return i;
+ }
+
+ DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ return -EINVAL;
+}
+
static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
{
int i;
- DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
check_timing->xres, check_timing->yres,
- check_timing->refresh, (check_timing->vmode &
- FB_VMODE_INTERLACED) ? true : false);
+ check_timing->refresh, check_timing->pixclock,
+ (check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false);
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++)
- if (hdmi_confs[i].width == check_timing->xres &&
- hdmi_confs[i].height == check_timing->yres &&
- hdmi_confs[i].vrefresh == check_timing->refresh &&
- hdmi_confs[i].interlace ==
- ((check_timing->vmode & FB_VMODE_INTERLACED) ?
- true : false))
- return 0;
-
- /* TODO */
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
+ if (hdmiphy_v14_configs[i].pixel_clock ==
+ check_timing->pixclock)
+ return 0;
return -EINVAL;
}
-static int hdmi_check_timing(void *ctx, void *timing)
+static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
{
struct hdmi_context *hdata = ctx;
- struct fb_videomode *check_timing = timing;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
- check_timing->yres, check_timing->refresh,
- check_timing->vmode);
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres,
+ timing->yres, timing->refresh,
+ timing->vmode);
if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_check_timing(check_timing);
+ return hdmi_v13_check_timing(timing);
else
- return hdmi_v14_check_timing(check_timing);
+ return hdmi_v14_check_timing(timing);
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1795,9 +1391,8 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
{
- const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf;
- const struct hdmi_core_regs *core = &conf->core;
- const struct hdmi_tg_regs *tg = &conf->tg;
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
int tries;
/* setting core registers */
@@ -1900,39 +1495,39 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -2029,10 +1624,17 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->type == HDMI_TYPE13) {
hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
- else
- hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
+ } else {
+ i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
+ if (i < 0) {
+ DRM_ERROR("failed to find hdmiphy conf\n");
+ return;
+ }
+
+ hdmiphy_data = hdmiphy_v14_configs[i].conf;
+ }
memcpy(buffer, hdmiphy_data, 32);
ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -2100,7 +1702,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(adjusted_mode);
else
- index = hdmi_v14_conf_index(adjusted_mode);
+ index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
/* just return if user desired mode exists. */
if (index >= 0)
@@ -2114,7 +1716,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(m);
else
- index = hdmi_v14_conf_index(m);
+ index = hdmi_v14_find_phy_conf(m->clock * 1000);
if (index >= 0) {
struct drm_mode_object base;
@@ -2123,6 +1725,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
/* preserve display mode header while copying. */
head = adjusted_mode->head;
base = adjusted_mode->base;
@@ -2134,6 +1739,122 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
}
}
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+{
+ int i;
+ BUG_ON(num_bytes > 4);
+ for (i = 0; i < num_bytes; i++)
+ reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
+
+static void hdmi_v14_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
+
+ hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
+
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->v_line, 2, m->vtotal);
+ hdmi_set_reg(core->h_line, 2, m->htotal);
+ hdmi_set_reg(core->hsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_set_reg(core->vsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_set_reg(core->int_pro_mode, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
+ hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
+ hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+ hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+ hdmi_set_reg(tg->vact_st3, 2, 0x0);
+ hdmi_set_reg(tg->vact_st4, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal);
+ hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
+ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
+ hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
+ hdmi_set_reg(core->vact_space_1, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_2, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_3, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_4, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_5, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_6, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
+
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0);
+
+}
+
static void hdmi_mode_set(void *ctx, void *mode)
{
struct hdmi_context *hdata = ctx;
@@ -2141,11 +1862,15 @@ static void hdmi_mode_set(void *ctx, void *mode)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- conf_idx = hdmi_conf_index(hdata, mode);
- if (conf_idx >= 0)
- hdata->cur_conf = conf_idx;
- else
- DRM_DEBUG_KMS("not supported mode\n");
+ if (hdata->type == HDMI_TYPE13) {
+ conf_idx = hdmi_v13_conf_index(mode);
+ if (conf_idx >= 0)
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+ } else {
+ hdmi_v14_mode_set(hdata, mode);
+ }
}
static void hdmi_get_max_resol(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c414584bfba..e919aba29b3 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -284,13 +284,13 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
MXR_CFG_SCAN_PROGRASSIVE);
/* choosing between porper HD and SD mode */
- if (height == 480)
+ if (height <= 480)
val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (height == 576)
+ else if (height <= 576)
val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (height == 720)
+ else if (height <= 720)
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (height == 1080)
+ else if (height <= 1080)
val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
else
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
@@ -818,6 +818,29 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
+int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ u32 w, h;
+
+ w = timing->xres;
+ h = timing->yres;
+
+ DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ __func__, timing->xres, timing->yres,
+ timing->refresh, (timing->vmode &
+ FB_VMODE_INTERLACED) ? true : false);
+
+ if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
+ return 0;
+
+ if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return 0;
+
+ return -EINVAL;
+}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
@@ -955,6 +978,9 @@ static struct exynos_mixer_ops mixer_ops = {
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
+
+ /* display */
+ .check_timing = mixer_check_timing,
};
static irqreturn_t mixer_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index afded54dbb1..2590cac8425 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -260,13 +260,13 @@ static int psb_framebuffer_init(struct drm_device *dev,
default:
return -EINVAL;
}
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
- fb->gtt = gt;
return 0;
}
@@ -545,9 +545,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- int new_fb = 0;
int bytespp;
- int ret;
bytespp = sizes->surface_bpp / 8;
if (bytespp == 3) /* no 24bit packed */
@@ -562,13 +560,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
sizes->surface_depth = 16;
}
- if (!helper->fb) {
- ret = psbfb_create(psb_fbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
+ return psbfb_create(psb_fbdev, sizes);
}
static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
@@ -590,6 +582,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
framebuffer_release(info);
}
drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
@@ -615,6 +608,10 @@ int psb_fbdev_init(struct drm_device *dev)
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
return 0;
}
@@ -668,30 +665,6 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
- struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = dev_priv->fbdev;
- struct drm_crtc *crtc;
- int reset = 0;
-
- /* Should never get stolen memory for a user fb */
- WARN_ON(r->stolen);
-
- /* Check if we are erroneously live */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- if (crtc->fb == fb)
- reset = 1;
-
- if (reset)
- /*
- * Now force a sane response before we permit the DRM CRTC
- * layer to do stupid things like blank the display. Instead
- * we reset this framebuffer as if the user had forced a reset.
- * We must do this before the cleanup so that the DRM layer
- * doesn't get a chance to stick its oar in where it isn't
- * wanted.
- */
- drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
/* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index b58c4701c4e..f6f534b4197 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -194,7 +194,7 @@ static int psb_save_display_registers(struct drm_device *dev)
regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->save(crtc);
@@ -204,7 +204,7 @@ static int psb_save_display_registers(struct drm_device *dev)
if (connector->funcs->save)
connector->funcs->save(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
@@ -234,7 +234,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->restore(crtc);
@@ -243,7 +243,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
if (connector->funcs->restore)
connector->funcs->restore(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index dd1fbfa7e46..111e3df9c5d 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -149,6 +149,16 @@ static struct drm_ioctl_desc psb_ioctls[] = {
static void psb_lastclose(struct drm_device *dev)
{
+ int ret;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+ drm_modeset_lock_all(dev);
+ ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+ drm_modeset_unlock_all(dev);
+
return;
}
@@ -476,7 +486,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
case PSB_MODE_OPERATION_MODE_VALID:
umode = &arg->mode;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
@@ -525,7 +535,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
if (mode)
drm_mode_destroy(dev, mode);
mode_op_out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
default:
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 8033526bb53..9edb1902a09 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -85,14 +85,14 @@ struct psb_intel_limit_t {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define I9XX_N_MIN 3
-#define I9XX_N_MAX 8
+#define I9XX_N_MIN 1
+#define I9XX_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define I9XX_M1_MIN 10
-#define I9XX_M1_MAX 20
-#define I9XX_M2_MIN 5
-#define I9XX_M2_MAX 9
+#define I9XX_M1_MIN 8
+#define I9XX_M1_MAX 18
+#define I9XX_M2_MIN 3
+#define I9XX_M2_MAX 7
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 00000000000..4d341db462a
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
+menu "I2C encoder or helper chips"
+ depends on DRM && DRM_KMS_HELPER && I2C
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ default m if DRM_NOUVEAU
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
+endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 92862563e7e..43aa33baebe 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -5,3 +5,6 @@ obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
sil164-y := sil164_drv.o
obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b865d0728e2..51fa3239202 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -364,7 +364,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 00000000000..e68b58a1aaf
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_edid.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct tda998x_priv {
+ struct i2c_client *cec;
+ uint16_t rev;
+ uint8_t current_page;
+ int dpms;
+};
+
+#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+
+/* The TDA9988 series of devices use a paged register scheme.. to simplify
+ * things we encode the page # in upper bits of the register #. To read/
+ * write a given register, we need to make sure CURPAGE register is set
+ * appropriately. Which implies reads/writes are not atomic. Fun!
+ */
+
+#define REG(page, addr) (((page) << 8) | (addr))
+#define REG2ADDR(reg) ((reg) & 0xff)
+#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
+
+#define REG_CURPAGE 0xff /* write */
+
+
+/* Page 00h: General Control */
+#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
+#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
+# define MAIN_CNTRL0_SR (1 << 0)
+# define MAIN_CNTRL0_DECS (1 << 1)
+# define MAIN_CNTRL0_DEHS (1 << 2)
+# define MAIN_CNTRL0_CECS (1 << 3)
+# define MAIN_CNTRL0_CEHS (1 << 4)
+# define MAIN_CNTRL0_SCALER (1 << 7)
+#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
+#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
+# define SOFTRESET_AUDIO (1 << 0)
+# define SOFTRESET_I2C_MASTER (1 << 1)
+#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
+#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
+#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
+# define I2C_MASTER_DIS_MM (1 << 0)
+# define I2C_MASTER_DIS_FILT (1 << 1)
+# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
+#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
+#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
+# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
+#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
+#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
+#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
+#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
+# define VIP_CNTRL_0_MIRR_A (1 << 7)
+# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
+# define VIP_CNTRL_0_MIRR_B (1 << 3)
+# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
+# define VIP_CNTRL_1_MIRR_C (1 << 7)
+# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
+# define VIP_CNTRL_1_MIRR_D (1 << 3)
+# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
+# define VIP_CNTRL_2_MIRR_E (1 << 7)
+# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
+# define VIP_CNTRL_2_MIRR_F (1 << 3)
+# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
+# define VIP_CNTRL_3_X_TGL (1 << 0)
+# define VIP_CNTRL_3_H_TGL (1 << 1)
+# define VIP_CNTRL_3_V_TGL (1 << 2)
+# define VIP_CNTRL_3_EMB (1 << 3)
+# define VIP_CNTRL_3_SYNC_DE (1 << 4)
+# define VIP_CNTRL_3_SYNC_HS (1 << 5)
+# define VIP_CNTRL_3_DE_INT (1 << 6)
+# define VIP_CNTRL_3_EDGE (1 << 7)
+#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
+# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
+# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
+# define VIP_CNTRL_4_CCIR656 (1 << 4)
+# define VIP_CNTRL_4_656_ALT (1 << 5)
+# define VIP_CNTRL_4_TST_656 (1 << 6)
+# define VIP_CNTRL_4_TST_PAT (1 << 7)
+#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
+# define VIP_CNTRL_5_CKCASE (1 << 0)
+# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
+# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
+# define MAT_CONTRL_MAT_BP (1 << 2)
+#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
+#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
+#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
+#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
+#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
+#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
+#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
+#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
+#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
+#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
+#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
+#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
+#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
+#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
+#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
+#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
+#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
+#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
+#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
+#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
+#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
+#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
+#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
+#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
+#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
+#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
+#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
+#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
+#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
+#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
+#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
+# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
+# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
+#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
+# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
+# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
+# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
+# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
+# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
+# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
+#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
+#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
+# define HVF_CNTRL_0_SM (1 << 7)
+# define HVF_CNTRL_0_RWB (1 << 6)
+# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
+# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
+#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
+# define HVF_CNTRL_1_FOR (1 << 0)
+# define HVF_CNTRL_1_YUVBLK (1 << 1)
+# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
+# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
+# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
+#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+
+
+/* Page 02h: PLL settings */
+#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
+# define PLL_SERIAL_1_SRL_FDN (1 << 0)
+# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
+# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
+#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
+# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
+#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
+# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
+# define PLL_SERIAL_3_SRL_DE (1 << 2)
+# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
+#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
+#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
+#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
+#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
+#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
+#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
+#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
+#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
+#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
+# define SEL_CLK_SEL_CLK1 (1 << 0)
+# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
+# define SEL_CLK_ENA_SC_CLK (1 << 3)
+#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
+
+
+/* Page 09h: EDID Control */
+#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
+/* next 127 successive registers are the EDID block */
+#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
+#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
+#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
+#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
+#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
+
+
+/* Page 10h: information frames and packets */
+
+
+/* Page 11h: audio settings and content info packets */
+#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
+# define AIP_CNTRL_0_RST_FIFO (1 << 0)
+# define AIP_CNTRL_0_SWAP (1 << 1)
+# define AIP_CNTRL_0_LAYOUT (1 << 2)
+# define AIP_CNTRL_0_ACR_MAN (1 << 5)
+# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
+# define ENC_CNTRL_RST_ENC (1 << 0)
+# define ENC_CNTRL_RST_SEL (1 << 1)
+# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+
+
+/* Page 12h: HDCP and OTP */
+#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX33 REG(0x12, 0xb8) /* read/write */
+# define TX33_HDMI (1 << 1)
+
+
+/* Page 13h: Gamut related metadata packets */
+
+
+
+/* CEC registers: (not paged)
+ */
+#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
+# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
+# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
+# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
+# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
+#define REG_CEC_RXSHPDLEV 0xfe /* read */
+# define CEC_RXSHPDLEV_RXSENS (1 << 0)
+# define CEC_RXSHPDLEV_HPD (1 << 1)
+
+#define REG_CEC_ENAMODS 0xff /* read/write */
+# define CEC_ENAMODS_DIS_FRO (1 << 6)
+# define CEC_ENAMODS_DIS_CCLK (1 << 5)
+# define CEC_ENAMODS_EN_RXSENS (1 << 2)
+# define CEC_ENAMODS_EN_HDMI (1 << 1)
+# define CEC_ENAMODS_EN_CEC (1 << 0)
+
+
+/* Device versions: */
+#define TDA9989N2 0x0101
+#define TDA19989 0x0201
+#define TDA19989N2 0x0202
+#define TDA19988 0x0301
+
+static void
+cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+}
+
+static uint8_t
+cec_read(struct drm_encoder *encoder, uint8_t addr)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+ return 0;
+}
+
+static void
+set_page(struct drm_encoder *encoder, uint16_t reg)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ if (REG2PAGE(reg) != priv->current_page) {
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {
+ REG_CURPAGE, REG2PAGE(reg)
+ };
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
+
+ priv->current_page = REG2PAGE(reg);
+ }
+}
+
+static int
+reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t addr = REG2ADDR(reg);
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, buf, cnt);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+ return ret;
+}
+
+static uint8_t
+reg_read(struct drm_encoder *encoder, uint16_t reg)
+{
+ uint8_t val = 0;
+ reg_read_range(encoder, reg, &val, sizeof(val));
+ return val;
+}
+
+static void
+reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) | val);
+}
+
+static void
+reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
+}
+
+static void
+tda998x_reset(struct drm_encoder *encoder)
+{
+ /* reset audio and i2c master: */
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+
+ /* reset transmitter: */
+ reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+
+ /* PLL registers common configuration */
+ reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+ reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
+ reg_write(encoder, REG_SERIALIZER, 0x00);
+ reg_write(encoder, REG_BUFFER_OUT, 0x00);
+ reg_write(encoder, REG_PLL_SCG1, 0x00);
+ reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+ reg_write(encoder, REG_PLL_SCGN1, 0xfa);
+ reg_write(encoder, REG_PLL_SCGN2, 0x00);
+ reg_write(encoder, REG_PLL_SCGR1, 0x5b);
+ reg_write(encoder, REG_PLL_SCGR2, 0x00);
+ reg_write(encoder, REG_PLL_SCG2, 0x10);
+}
+
+/* DRM encoder functions */
+
+static void
+tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+}
+
+static void
+tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ /* we only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == priv->dpms)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* enable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0xff);
+ reg_write(encoder, REG_ENA_VP_0, 0xff);
+ reg_write(encoder, REG_ENA_VP_1, 0xff);
+ reg_write(encoder, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(encoder, REG_VIP_CNTRL_0,
+ VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
+ reg_write(encoder, REG_VIP_CNTRL_1,
+ VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
+ reg_write(encoder, REG_VIP_CNTRL_2,
+ VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* disable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0x00);
+ reg_write(encoder, REG_ENA_VP_0, 0x00);
+ reg_write(encoder, REG_ENA_VP_1, 0x00);
+ reg_write(encoder, REG_ENA_VP_2, 0x00);
+ break;
+ }
+
+ priv->dpms = mode;
+}
+
+static void
+tda998x_encoder_save(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static void
+tda998x_encoder_restore(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static bool
+tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+tda998x_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint16_t hs_start, hs_end, line_start, line_end;
+ uint16_t vwin_start, vwin_end, de_start, de_end;
+ uint16_t ref_pix, ref_line, pix_start2;
+ uint8_t reg, div, rep;
+
+ hs_start = mode->hsync_start - mode->hdisplay;
+ hs_end = mode->hsync_end - mode->hdisplay;
+ line_start = 1;
+ line_end = 1 + mode->vsync_end - mode->vsync_start;
+ vwin_start = mode->vtotal - mode->vsync_start;
+ vwin_end = vwin_start + mode->vdisplay;
+ de_start = mode->htotal - mode->hdisplay;
+ de_end = mode->htotal;
+
+ pix_start2 = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ pix_start2 = (mode->htotal / 2) + hs_start;
+
+ /* TODO how is this value calculated? It is 2 for all common
+ * formats in the tables in out of tree nxp driver (assuming
+ * I've properly deciphered their byzantine table system)
+ */
+ ref_line = 2;
+
+ /* this might changes for other color formats from the CRTC: */
+ ref_pix = 3 + hs_start;
+
+ div = 148500 / mode->clock;
+
+ DBG("clock=%d, div=%u", mode->clock, div);
+ DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
+ hs_start, hs_end, line_start, line_end);
+ DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
+ vwin_start, vwin_end, de_start, de_end);
+ DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
+ ref_line, ref_pix, pix_start2);
+
+ /* mute the audio FIFO: */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+
+ /* set HDMI HDCP mode off: */
+ reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_clear(encoder, REG_TX33, TX33_HDMI);
+
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
+ /* no pre-filter or interpolator: */
+ reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+ HVF_CNTRL_0_INTPOL(0));
+ reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+ reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+ VIP_CNTRL_4_BLC(0));
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
+
+ reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
+ reg_write(encoder, REG_SERIALIZER, 0);
+ reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+
+ /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
+ rep = 0;
+ reg_write(encoder, REG_RPT_CNTRL, 0);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+ SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+ PLL_SERIAL_2_SRL_PR(rep));
+
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
+
+ /* set color matrix bypass flag: */
+ reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
+
+ /* set BIAS tmds value: */
+ reg_write(encoder, REG_ANA_GENERAL, 0x09);
+
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+
+ reg_write(encoder, REG_VIP_CNTRL_3, 0);
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+
+ reg_write(encoder, REG_VIDFORMAT, 0x00);
+ reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
+ reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
+ reg_write16(encoder, REG_DE_START_MSB, de_start);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+
+ if (priv->rev == TDA19988) {
+ /* let incoming pixels fill the active space (if any) */
+ reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ }
+
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+
+ reg = TBG_CNTRL_1_VHX_EXT_DE |
+ TBG_CNTRL_1_VHX_EXT_HS |
+ TBG_CNTRL_1_VHX_EXT_VS |
+ TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
+ TBG_CNTRL_1_VH_TGL_2;
+ if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
+ reg |= TBG_CNTRL_1_VH_TGL_0;
+ reg_set(encoder, REG_TBG_CNTRL_1, reg);
+
+ /* must be last register set: */
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+}
+
+static enum drm_connector_status
+tda998x_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int
+read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+{
+ uint8_t offset, segptr;
+ int ret, i;
+
+ /* enable EDID read irq: */
+ reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(encoder, REG_DDC_ADDR, 0xa0);
+ reg_write(encoder, REG_DDC_OFFS, offset);
+ reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(encoder, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ reg_write(encoder, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(encoder, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ for (i = 100; i > 0; i--) {
+ uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
+ if (val & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ msleep(1);
+ }
+
+ if (i == 0)
+ return -ETIMEDOUT;
+
+ ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
+ if (ret != EDID_LENGTH) {
+ dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
+ blk, ret);
+ return ret;
+ }
+
+ reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ return 0;
+}
+
+static uint8_t *
+do_get_edid(struct drm_encoder *encoder)
+{
+ int j = 0, valid_extensions = 0;
+ uint8_t *block, *new;
+ bool print_bad_edid = drm_debug & DRM_UT_KMS;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ if (read_edid_block(encoder, block, 0))
+ goto fail;
+
+ if (!drm_edid_block_valid(block, 0, print_bad_edid))
+ goto fail;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
+ if (read_edid_block(encoder, ext_block, j))
+ goto fail;
+
+ if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
+ goto fail;
+
+ valid_extensions++;
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+ }
+
+ return block;
+
+fail:
+ dev_warn(encoder->dev->dev, "failed to read EDID\n");
+ kfree(block);
+ return NULL;
+}
+
+static int
+tda998x_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct edid *edid = (struct edid *)do_get_edid(encoder);
+ int n = 0;
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return n;
+}
+
+static int
+tda998x_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ DBG("");
+ return 0;
+}
+
+static int
+tda998x_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DBG("");
+ return 0;
+}
+
+static void
+tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ drm_i2c_encoder_destroy(encoder);
+ kfree(priv);
+}
+
+static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
+ .set_config = tda998x_encoder_set_config,
+ .destroy = tda998x_encoder_destroy,
+ .dpms = tda998x_encoder_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_mode_valid,
+ .mode_set = tda998x_encoder_mode_set,
+ .detect = tda998x_encoder_detect,
+ .get_modes = tda998x_encoder_get_modes,
+ .create_resources = tda998x_encoder_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return 0;
+}
+
+static int
+tda998x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int
+tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct drm_encoder *encoder = &encoder_slave->base;
+ struct tda998x_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->current_page = 0;
+ priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ priv->dpms = DRM_MODE_DPMS_OFF;
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_funcs;
+
+ /* wake up the device: */
+ cec_write(encoder, REG_CEC_ENAMODS,
+ CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
+
+ tda998x_reset(encoder);
+
+ /* read version: */
+ priv->rev = reg_read(encoder, REG_VERSION_LSB) |
+ reg_read(encoder, REG_VERSION_MSB) << 8;
+
+ /* mask off feature bits: */
+ priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
+
+ switch (priv->rev) {
+ case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break;
+ case TDA19989: dev_info(dev->dev, "found TDA19989"); break;
+ case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
+ case TDA19988: dev_info(dev->dev, "found TDA19988"); break;
+ default:
+ DBG("found unsupported device: %04x", priv->rev);
+ goto fail;
+ }
+
+ /* after reset, enable DDC: */
+ reg_write(encoder, REG_DDC_DISABLE, 0x00);
+
+ /* set clock on DDC channel: */
+ reg_write(encoder, REG_TX3, 39);
+
+ /* if necessary, disable multi-master: */
+ if (priv->rev == TDA19989)
+ reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+
+ cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
+ CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+
+ return 0;
+
+fail:
+ /* if encoder_init fails, the encoder slave is never registered,
+ * so cleanup here:
+ */
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
+ kfree(priv);
+ encoder_slave->slave_priv = NULL;
+ encoder_slave->slave_funcs = NULL;
+ return -ENXIO;
+}
+
+static struct i2c_device_id tda998x_ids[] = {
+ { "tda998x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tda998x_ids);
+
+static struct drm_i2c_encoder_driver tda998x_driver = {
+ .i2c_driver = {
+ .probe = tda998x_probe,
+ .remove = tda998x_remove,
+ .driver = {
+ .name = "tda998x",
+ },
+ .id_table = tda998x_ids,
+ },
+ .encoder_init = tda998x_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+tda998x_init(void)
+{
+ DBG("");
+ return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
+}
+
+static void __exit
+tda998x_exit(void)
+{
+ DBG("");
+ drm_i2c_encoder_unregister(&tda998x_driver);
+}
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
+MODULE_LICENSE("GPL");
+
+module_init(tda998x_init);
+module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0f2c5493242..91f3ac6cef3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \
i915_sysfs.o \
i915_trace_points.o \
+ i915_ums.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32158d21c63..aae31489c89 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->gtt_space != NULL)
seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->stolen)
+ seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_mappable)
@@ -257,8 +259,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size);
- seq_printf(m, "%zu [%zu] gtt total\n",
- dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
+ seq_printf(m, "%zu [%lu] gtt total\n",
+ dev_priv->gtt.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex);
@@ -388,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_ring_buffer *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %d\n",
+ seq_printf(m, "Current sequence (%s): %u\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -545,11 +548,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- const volatile u32 __iomem *hws;
+ const u32 *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 __iomem *)ring->status_page.page_addr;
+ hws = ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -609,7 +612,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -691,7 +694,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
- seq_printf(m, "Kernel: " UTS_RELEASE);
+ seq_printf(m, "Kernel: " UTS_RELEASE "\n");
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
@@ -816,11 +819,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error_priv->error = dev_priv->first_error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv);
}
@@ -846,6 +849,77 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+static ssize_t
+i915_next_seqno_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf),
+ "next_seqno : 0x%x\n",
+ dev_priv->next_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_next_seqno_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ char buf[20];
+ u32 val = 1;
+ int ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_set_seqno(dev, val);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_next_seqno_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_next_seqno_read,
+ .write = i915_next_seqno_write,
+ .llseek = default_llseek,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -888,7 +962,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat;
+ u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -907,6 +981,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ if (IS_HASWELL(dev))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -919,8 +998,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -1372,28 +1450,31 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
+ mutex_unlock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
if (&fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
-
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -1403,7 +1484,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct intel_ring_buffer *ring;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
@@ -1421,6 +1503,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_printf(m, "\n");
}
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->default_context) {
+ seq_printf(m, "HW default context %s ring ", ring->name);
+ describe_obj(m, ring->default_context->obj);
+ seq_printf(m, "\n");
+ }
+ }
+
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -1556,7 +1646,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
if (ret)
return ret;
@@ -1585,7 +1675,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@@ -1603,7 +1693,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf),
"wedged : %d\n",
- atomic_read(&dev_priv->mm.wedged));
+ atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1658,7 +1748,7 @@ i915_ring_stop_read(struct file *filp,
int len;
len = snprintf(buf, sizeof(buf),
- "0x%08x\n", dev_priv->stop_rings);
+ "0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1694,7 +1784,7 @@ i915_ring_stop_write(struct file *filp,
if (ret)
return ret;
- dev_priv->stop_rings = val;
+ dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
return cnt;
@@ -1708,6 +1798,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek,
};
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE)
+static ssize_t
+i915_drop_caches_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_drop_caches_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ char buf[20];
+ int val = 0, ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
+
+ /* No need to check and wait for gpu resets, only libdrm auto-restarts
+ * on ioctls on -EAGAIN. */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (val & DROP_ACTIVE) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto unlock;
+ }
+
+ if (val & (DROP_RETIRE | DROP_ACTIVE))
+ i915_gem_retire_requests(dev);
+
+ if (val & DROP_BOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+ if (val & DROP_UNBOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0) {
+ ret = i915_gem_object_put_pages(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_drop_caches_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_drop_caches_read,
+ .write = i915_drop_caches_write,
+ .llseek = default_llseek,
+};
+
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@@ -2105,11 +2291,23 @@ int i915_debugfs_init(struct drm_minor *minor)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_gem_drop_caches",
+ &i915_drop_caches_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state",
&i915_error_state_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_next_seqno",
+ &i915_next_seqno_fops);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -2129,10 +2327,14 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
+ 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105..4fa6beb14c7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
+ ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@@ -1297,19 +1303,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem_stolen;
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_gem_stolen;
-
- intel_modeset_gem_init(dev);
+ goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
- ret = drm_irq_install(dev);
- if (ret)
- goto cleanup_gem;
+ intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1317,7 +1325,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = intel_fbdev_init(dev);
if (ret)
- goto cleanup_irq;
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. Now we should scan for the initial config
+ * only once hotplug handling is enabled, but due to screwed-up locking
+ * around kms/fbdev init we can't protect the fdbev initial config
+ * scanning against hotplug events. Hence do this first and ignore the
+ * tiny window where we will loose hotplug notifactions.
+ */
+ intel_fbdev_initial_config(dev);
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
drm_kms_helper_poll_init(dev);
@@ -1326,13 +1352,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
-cleanup_irq:
- drm_irq_uninstall(dev);
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_irq:
+ drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
@@ -1400,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
- ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
- ap->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ ap->ranges[0].base = dev_priv->gtt.mappable_base;
+ ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
+
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1516,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch;
}
- aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
+ aperture_size = dev_priv->gtt.mappable_end;
- dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable =
+ io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size);
- if (dev_priv->mm.gtt_mapping == NULL) {
+ if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
}
- i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
@@ -1580,11 +1605,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->error_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1614,9 +1640,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_opregion_init(dev);
acpi_video_register();
- setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
- (unsigned long) dev);
-
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
@@ -1635,15 +1658,15 @@ out_gem_unload:
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable_base,
aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- i915_gem_gtt_fini(dev);
+ dev_priv->gtt.gtt_remove(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1673,11 +1696,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
- dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
+ dev_priv->gtt.mappable_base,
+ dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
@@ -1703,8 +1726,8 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->hangcheck_timer);
- cancel_work_sync(&dev_priv->error_work);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -1723,9 +1746,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
- drm_mm_takedown(&dev_priv->mm.stolen);
-
- intel_cleanup_overlay(dev);
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
@@ -1738,6 +1758,10 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1..c5b8c81b944 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -468,6 +470,13 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* ignore lid events during suspend */
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_SUSPENDED;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_set_power_well(dev, true);
+
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
@@ -492,9 +501,6 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_opregion_fini(dev);
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
console_lock();
intel_fbdev_set_suspend(dev, 1);
console_unlock();
@@ -565,12 +571,11 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
+ intel_hpd_init(dev);
}
intel_opregion_init(dev);
- dev_priv->modeset_on_lid = 0;
-
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
@@ -583,6 +588,9 @@ static int __i915_drm_thaw(struct drm_device *dev)
schedule_work(&dev_priv->console_resume_work);
}
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_DONE;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return error;
}
@@ -778,9 +786,9 @@ int intel_gpu_reset(struct drm_device *dev)
}
/* Also reset the gpu hangman. */
- if (dev_priv->stop_rings) {
+ if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->stop_rings = 0;
+ dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -819,12 +827,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5)
+ if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else
ret = intel_gpu_reset(dev);
- dev_priv->last_gpu_reset = get_seconds();
+ dev_priv->gpu_error.last_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@@ -870,6 +878,7 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev);
drm_irq_install(dev);
+ intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1113,102 +1122,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
-
-static bool IS_DISPLAYREG(u32 reg)
-{
- /*
- * This should make it easier to transition modules over to the
- * new register block scheme, since we can do it incrementally.
- */
- if (reg >= VLV_DISPLAY_BASE)
- return false;
-
- if (reg >= RENDER_RING_BASE &&
- reg < RENDER_RING_BASE + 0xff)
- return false;
- if (reg >= GEN6_BSD_RING_BASE &&
- reg < GEN6_BSD_RING_BASE + 0xff)
- return false;
- if (reg >= BLT_RING_BASE &&
- reg < BLT_RING_BASE + 0xff)
- return false;
-
- if (reg == PGTBL_ER)
- return false;
-
- if (reg >= IPEIR_I965 &&
- reg < HWSTAM)
- return false;
-
- if (reg == MI_MODE)
- return false;
-
- if (reg == GFX_MODE_GEN7)
- return false;
-
- if (reg == RENDER_HWS_PGA_GEN7 ||
- reg == BSD_HWS_PGA_GEN7 ||
- reg == BLT_HWS_PGA_GEN7)
- return false;
-
- if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
- reg == GEN6_BSD_RNCID)
- return false;
-
- if (reg == GEN6_BLITTER_ECOSKPD)
- return false;
-
- if (reg >= 0x4000c &&
- reg <= 0x4002c)
- return false;
-
- if (reg >= 0x4f000 &&
- reg <= 0x4f08f)
- return false;
-
- if (reg >= 0x4f100 &&
- reg <= 0x4f11f)
- return false;
-
- if (reg >= VLV_MASTER_IER &&
- reg <= GEN6_PMIER)
- return false;
-
- if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
- reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
- return false;
-
- if (reg >= VLV_IIR_RW &&
- reg <= VLV_ISR)
- return false;
-
- if (reg == FORCEWAKE_VLV ||
- reg == FORCEWAKE_ACK_VLV)
- return false;
-
- if (reg == GEN6_GDRST)
- return false;
-
- switch (reg) {
- case _3D_CHICKEN3:
- case IVB_CHICKEN3:
- case GEN7_COMMON_SLICE_CHICKEN1:
- case GEN7_L3CNTLREG1:
- case GEN7_L3_CHICKEN_MODE_REGISTER:
- case GEN7_ROW_CHICKEN2:
- case GEN7_L3SQCREG4:
- case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
- case GEN7_HALF_SLICE_CHICKEN1:
- case GEN6_MBCTL:
- case GEN6_UCGCTL2:
- return false;
- default:
- break;
- }
-
- return true;
-}
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -1232,8 +1145,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@@ -1260,11 +1171,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
- if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- write##y(val, dev_priv->regs + reg + 0x180000); \
- } else { \
- write##y(val, dev_priv->regs + reg); \
- } \
+ write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12ab3bdea54..e95337c9745 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
+#include <uapi/drm/i915_drm.h>
+
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
#include <linux/backlight.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/pm_qos.h>
/* General customization:
*/
@@ -83,7 +86,12 @@ enum port {
};
#define port_name(p) ((p) + 'A')
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
@@ -101,6 +109,19 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n);
+
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
@@ -279,6 +300,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -318,6 +340,7 @@ struct drm_i915_gt_funcs {
DEV_INFO_FLAG(has_llc)
struct intel_device_info {
+ u32 display_mmio_offset;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
@@ -345,6 +368,50 @@ struct intel_device_info {
u8 has_llc:1;
};
+enum i915_cache_level {
+ I915_CACHE_NONE = 0,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ unsigned long start; /* Start offset of used GTT */
+ size_t total; /* Total size GTT can map */
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+ phys_addr_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+
+ bool do_idle_maps;
+ dma_addr_t scratch_page_dma;
+ struct page *scratch_page;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ unsigned long *mappable_end);
+ void (*gtt_remove)(struct drm_device *dev);
+ void (*gtt_clear_range)(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*gtt_insert_entries)(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+};
+#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
@@ -354,6 +421,16 @@ struct i915_hw_ppgtt {
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
+
+ /* pte functions, mirroring the interface of the global gtt. */
+ void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@@ -580,6 +657,9 @@ struct intel_gen6_power_mgmt {
struct mutex hw_lock;
};
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
@@ -620,8 +700,162 @@ struct intel_l3_parity {
struct work_struct error_work;
};
+struct i915_gem_mm {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ int gtt_mtrr;
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ u32 object_count;
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct work;
+
+ unsigned long last_reset;
+
+ /**
+ * State variable and reset counter controlling the reset flow
+ *
+ * Upper bits are for the reset counter. This counter is used by the
+ * wait_seqno code to race-free noticed that a reset event happened and
+ * that it needs to restart the entire ioctl (since most likely the
+ * seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ *
+ * Lowest bit controls the reset state machine: Set means a reset is in
+ * progress. This state will (presuming we don't have any bugs) decay
+ * into either unset (successful reset) or the special WEDGED value (hw
+ * terminally sour). All waiters on the reset_queue will be woken when
+ * that happens.
+ */
+ atomic_t reset_counter;
+
+ /**
+ * Special values/flags for reset_counter
+ *
+ * Note that the code relies on
+ * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
+ * being true.
+ */
+#define I915_RESET_IN_PROGRESS_FLAG 1
+#define I915_WEDGED 0xffffffff
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* For gpu hang simulation. */
+ unsigned int stop_rings;
+};
+
+enum modeset_restore {
+ MODESET_ON_LID_OPEN,
+ MODESET_DONE,
+ MODESET_SUSPENDED,
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ struct kmem_cache *slab;
const struct intel_device_info *info;
@@ -636,10 +870,11 @@ typedef struct drm_i915_private {
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
+ spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
@@ -649,9 +884,11 @@ typedef struct drm_i915_private {
*/
uint32_t gpio_mmio_base;
+ wait_queue_head_t gmbus_wait_queue;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
+ uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -661,31 +898,24 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* DPIO indirect register protection */
- spinlock_t dpio_lock;
+ struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
- u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
+ bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll;
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -696,7 +926,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
+ unsigned int sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -713,7 +943,6 @@ typedef struct drm_i915_private {
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -734,11 +963,6 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -750,115 +974,12 @@ typedef struct drm_i915_private {
unsigned long quirks;
- /* Register state */
- bool modeset_on_lid;
+ enum modeset_restore modeset_restore;
+ struct mutex modeset_restore_lock;
- struct {
- /** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
- /** Memory allocator for GTT stolen memory */
- struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head bound_list;
- /**
- * List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
- */
- struct list_head unbound_list;
-
- /** Usable portion of the GTT for GEM */
- unsigned long gtt_start;
- unsigned long gtt_mappable_end;
- unsigned long gtt_end;
-
- struct io_mapping *gtt_mapping;
- phys_addr_t gtt_base_addr;
- int gtt_mtrr;
-
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
-
- struct shrinker inactive_shrinker;
- bool shrinker_no_lock_stealing;
-
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * Are we in a non-interruptible section of code like
- * modesetting?
- */
- bool interruptible;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occurring and makes
- * every pending request fail
- */
- atomic_t wedged;
-
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
-
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
- /* accounting, useful for userland debugging */
- size_t gtt_total;
- size_t mappable_gtt_total;
- size_t object_memory;
- u32 object_count;
- } mm;
+ struct i915_gtt gtt;
+
+ struct i915_gem_mm mm;
/* Kernel Modesetting */
@@ -900,7 +1021,7 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
- unsigned long last_gpu_reset;
+ struct i915_gpu_error gpu_error;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@@ -919,7 +1040,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
- bool fdi_rx_polarity_reversed;
+ u32 fdi_rx_config;
struct i915_suspend_saved_registers regfile;
@@ -940,11 +1061,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-enum i915_cache_level {
- I915_CACHE_NONE = 0,
- I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
-};
+#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -971,6 +1088,8 @@ struct drm_i915_gem_object {
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
struct list_head gtt_list;
/** This object's place on the active/inactive lists */
@@ -1096,13 +1215,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
-
- /**
- * Number of crtcs where this object is currently the fb, but
- * will be page flipped away on the next vblank. When it
- * reaches 0, dev_priv->pending_flip_queue will be woken up.
- */
- atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
@@ -1141,7 +1253,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct {
- struct spinlock lock;
+ spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
@@ -1227,6 +1339,8 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_DDI(dev) (IS_HASWELL(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1323,6 +1437,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
@@ -1391,18 +1506,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1454,8 +1573,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
-
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1481,8 +1600,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+ return unlikely(atomic_read(&error->reset_counter)
+ & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter) == I915_WEDGED;
+}
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1523,9 +1652,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode);
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -1548,7 +1678,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_gtt.c */
-int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
@@ -1562,12 +1691,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
@@ -1585,9 +1712,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
+inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -1613,9 +1753,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
@@ -1672,6 +1812,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1744,5 +1885,19 @@ __i915_write(64, q)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+ if (HAS_PCH_SPLIT(dev))
+ return CPU_VGACNTRL;
+ else if (IS_VALLEYVIEW(dev))
+ return VLV_VGACNTRL;
+ else
+ return VGACNTRL;
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8febea6daa0..8413ffced81 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
}
static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct completion *x = &dev_priv->error_completion;
- unsigned long flags;
int ret;
- if (!atomic_read(&dev_priv->mm.wedged))
+#define EXIT_COND (!i915_reset_in_progress(error))
+ if (EXIT_COND)
return 0;
+ /* GPU is already declared terminally dead, give up. */
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
- ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ ret = wait_event_interruptible_timeout(error->reset_queue,
+ EXIT_COND,
+ 10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
}
+#undef EXIT_COND
- if (atomic_read(&dev_priv->mm.wedged)) {
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- }
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(dev);
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
@@ -149,6 +145,7 @@ int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +160,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
mutex_lock(&dev->struct_mutex);
- i915_gem_init_global_gtt(dev, args->gtt_start,
- args->gtt_end, args->gtt_end);
+ i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -186,12 +184,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
}
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ kmem_cache_free(dev_priv->slab, obj);
+}
+
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
@@ -215,7 +225,7 @@ i915_gem_create(struct drm_file *file,
if (ret) {
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- kfree(obj);
+ i915_gem_object_free(obj);
return ret;
}
@@ -259,14 +269,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
-static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
-
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj->tiling_mode != I915_TILING_NONE;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -407,7 +409,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
loff_t offset;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
@@ -469,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if (ret == 0)
goto next_page;
- hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -502,12 +502,6 @@ next_page:
out:
i915_gem_object_unpin_pages(obj);
- if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- }
-
return ret;
}
@@ -641,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
@@ -838,12 +832,13 @@ out:
i915_gem_object_unpin_pages(obj);
if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /*
+ * Fixup: Flush cpu caches in case we didn't flush the dirty
+ * cachelines in-line while writing and the object moved
+ * out of the cpu write domain while we've dropped the lock.
+ */
+ if (!needs_clflush_after &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev);
}
@@ -940,26 +935,17 @@ unlock:
}
int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
+ if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
+ /* Recovery complete, but the reset failed ... */
+ if (i915_terminally_wedged(error))
return -EIO;
return -EAGAIN;
@@ -990,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ unsigned reset_counter,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1026,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
+ i915_reset_in_progress(&dev_priv->gpu_error) || \
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1036,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ /* We need to check whether any gpu reset happened in between
+ * the caller grabbing the seqno and now ... */
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ end = -EAGAIN;
+
+ /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+ * gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
@@ -1082,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
@@ -1090,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno, interruptible, NULL);
+ return __wait_seqno(ring, seqno,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
+ interruptible, NULL);
}
/**
@@ -1137,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
+ unsigned reset_counter;
u32 seqno;
int ret;
@@ -1147,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (seqno == 0)
return 0;
- ret = i915_gem_check_wedge(dev_priv, true);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
@@ -1155,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests_ring(ring);
@@ -1344,6 +1351,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret)
@@ -1359,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1374,7 +1387,7 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
@@ -1432,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
-static uint32_t
+uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
@@ -1460,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
*/
-static uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced)
{
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 ||
+ if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
@@ -1480,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
-/**
- * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
- * unfenced object
- * @dev: the device
- * @size: size of the object
- * @tiling_mode: tiling mode of the object
- *
- * Return the required GTT alignment for an object, only taking into account
- * unfenced tiled surface requirements.
- */
-uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
-{
- /*
- * Minimum alignment is 4k (GTT page size) for sane hw.
- */
- if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- tiling_mode == I915_TILING_NONE)
- return 4096;
-
- /* Previous hardware however needs to be aligned to a power-of-two
- * tile height. The simplest method for determining this is to reuse
- * the power-of-tile object size.
- */
- return i915_gem_get_gtt_size(dev, size, tiling_mode);
-}
-
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1571,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
@@ -1689,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
-static int
+int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -1862,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (obj->pages)
return 0;
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to obtain a purgeable object\n");
+ return -EINVAL;
+ }
+
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj);
@@ -1918,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- if (obj->pin_count) /* are we a framebuffer? */
- intel_mark_fb_idle(obj);
-
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj->ring_list);
@@ -1940,30 +1925,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
}
static int
-i915_gem_handle_seqno_wrap(struct drm_device *dev)
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, j;
- /* The hardware uses various monotonic 32-bit counters, if we
- * detect that they will wraparound we need to idle the GPU
- * and reset those counters.
- */
- ret = 0;
+ /* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
- ret |= ring->sync_seqno[j] != 0;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
}
- if (ret == 0)
- return ret;
-
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
i915_gem_retire_requests(dev);
+
+ /* Finally reset hw state */
for_each_ring(ring, dev_priv, i) {
+ intel_ring_init_seqno(ring, seqno);
+
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
@@ -1971,6 +1950,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
return 0;
}
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev, seqno - 1);
+ if (ret)
+ return ret;
+
+ /* Carefully set the last_seqno value so that wrap
+ * detection still works
+ */
+ dev_priv->next_seqno = seqno;
+ dev_priv->last_seqno = seqno - 1;
+ if (dev_priv->last_seqno == 0)
+ dev_priv->last_seqno--;
+
+ return 0;
+}
+
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
@@ -1978,14 +1983,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_handle_seqno_wrap(dev);
+ int ret = i915_gem_init_seqno(dev, 0);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
- *seqno = dev_priv->next_seqno++;
+ *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
return 0;
}
@@ -2052,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
@@ -2317,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
@@ -2361,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, timeout);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
if (timeout) {
WARN_ON(!timespec_valid(timeout));
args->timeout_ns = timespec_to_ns(timeout);
@@ -2427,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
- /* Act a barrier for all accesses through the GTT */
- mb();
-
/* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj);
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
+ /* Wait for any direct GTT access to complete */
+ mb();
+
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -2454,7 +2462,7 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
- int ret = 0;
+ int ret;
if (obj->gtt_space == NULL)
return 0;
@@ -2521,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev)
return 0;
}
-static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint64_t val;
-
- if (obj) {
- u32 size = obj->gtt_space->size;
-
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
-
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
-}
-
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
uint64_t val;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
if (obj) {
u32 size = obj->gtt_space->size;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
- I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_965_0 + reg * 8);
+ fence_reg += reg * 8;
+ I915_WRITE64(fence_reg, val);
+ POSTING_READ(fence_reg);
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2645,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
switch (INTEL_INFO(dev)->gen) {
case 7:
- case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: break;
+ default: BUG();
}
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
}
static inline int fence_number(struct drm_i915_private *dev_priv,
@@ -2686,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
@@ -2696,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
obj->last_fenced_seqno = 0;
}
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
- mb();
-
obj->fenced_gpu_access = false;
return 0;
}
@@ -2712,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
@@ -2786,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
@@ -2807,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
- ret = i915_gem_object_flush_fence(old);
+ ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
@@ -2830,7 +2837,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
- * crossing memory domains and dieing.
+ * crossing memory domains and dying.
*/
if (HAS_LLC(dev))
return true;
@@ -2908,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
bool mappable, fenceable;
int ret;
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to bind a purgeable object\n");
- return -EINVAL;
- }
-
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, true);
unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
+ i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, false);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
@@ -2938,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
- (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2959,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
@@ -2999,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
(node->start & (fence_alignment - 1)) == 0;
mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3019,6 +3021,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return;
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ */
+ if (obj->stolen)
+ return;
+
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
@@ -3107,6 +3116,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_cpu_write_domain(obj);
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
+
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3413,11 +3429,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret;
- if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -3427,12 +3449,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ring = request->ring;
seqno = request->seqno;
}
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
spin_unlock(&file_priv->mm.lock);
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3706,14 +3729,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
- u32 mask;
+ gfp_t mask;
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- kfree(obj);
+ i915_gem_object_free(obj);
return NULL;
}
@@ -3785,6 +3808,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
+ i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -3795,7 +3819,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_info_remove_obj(dev_priv, obj->base.size);
kfree(obj->bit_17);
- kfree(obj);
+ i915_gem_object_free(obj);
}
int
@@ -3829,7 +3853,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer_sync(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -3848,7 +3872,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
u32 misccpctl;
int i;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
if (!dev_priv->l3_parity.remap_info)
@@ -3891,8 +3915,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else
+ else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else
+ BUG();
}
static bool
@@ -3911,22 +3937,11 @@ intel_enable_blt(struct drm_device *dev)
return true;
}
-int
-i915_gem_init_hw(struct drm_device *dev)
+static int i915_gem_init_rings(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
-
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
-
- i915_gem_l3_remap(dev);
-
- i915_gem_init_swizzling(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3943,76 +3958,59 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_bsd_ring;
}
- dev_priv->next_seqno = 1;
-
- /*
- * XXX: There was some w/a described somewhere suggesting loading
- * contexts before PPGTT.
- */
- i915_gem_context_init(dev);
- i915_gem_init_ppgtt(dev);
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_blt_ring;
return 0;
+cleanup_blt_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
return ret;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
+int
+i915_gem_init_hw(struct drm_device *dev)
{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
- return true;
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ i915_gem_l3_remap(dev);
+
+ i915_gem_init_swizzling(dev);
+
+ ret = i915_gem_init_rings(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
+ i915_gem_init_ppgtt(dev);
+
+ return 0;
}
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long gtt_size, mappable_size;
int ret;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
- i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_init_global_gtt(dev, 0, mappable_size,
- gtt_size);
- }
-
+ i915_gem_init_global_gtt(dev);
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
@@ -4047,9 +4045,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4113,8 +4111,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
void
i915_gem_load(struct drm_device *dev)
{
- int i;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->slab =
+ kmem_cache_create("i915_gem_object",
+ sizeof(struct drm_i915_gem_object), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4127,7 +4131,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- init_completion(&dev_priv->error_completion);
+ init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a3f06bcad55..21177d9df42 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -126,13 +126,8 @@ static int get_context_size(struct drm_device *dev)
static void do_destroy(struct i915_hw_context *ctx)
{
- struct drm_device *dev = ctx->obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
- else
- BUG_ON(ctx != dev_priv->ring[RCS].default_context);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
@@ -242,7 +237,6 @@ err_destroy:
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ctx_size;
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
@@ -254,11 +248,9 @@ void i915_gem_context_init(struct drm_device *dev)
dev_priv->ring[RCS].default_context)
return;
- ctx_size = get_context_size(dev);
- dev_priv->hw_context_size = get_context_size(dev);
- dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
+ dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
- if (ctx_size <= 0 || ctx_size > (1<<20)) {
+ if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index abeaafef6d7..6a5af682862 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
@@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
- kfree(obj);
+ i915_gem_object_free(obj);
goto fail_detach;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 776a3225184..c86d5d9356f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 26d08bb5821..2f2daebd0ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,61 +34,133 @@
#include <linux/dma_remapping.h>
struct eb_objects {
+ struct list_head objects;
int and;
- struct hlist_head buckets[0];
+ union {
+ struct drm_i915_gem_object *lut[0];
+ struct hlist_head buckets[0];
+ };
};
static struct eb_objects *
-eb_create(int size)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
{
- struct eb_objects *eb;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
- BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
- while (count > size)
- count >>= 1;
- eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
- GFP_KERNEL);
- if (eb == NULL)
- return eb;
-
- eb->and = count - 1;
+ struct eb_objects *eb = NULL;
+
+ if (args->flags & I915_EXEC_HANDLE_LUT) {
+ int size = args->buffer_count;
+ size *= sizeof(struct drm_i915_gem_object *);
+ size += sizeof(struct eb_objects);
+ eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ }
+
+ if (eb == NULL) {
+ int size = args->buffer_count;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+ while (count > 2*size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_TEMPORARY);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ } else
+ eb->and = -args->buffer_count;
+
+ INIT_LIST_HEAD(&eb->objects);
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
- memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+ if (eb->and >= 0)
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
-static void
-eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+static int
+eb_lookup_objects(struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file)
{
- hlist_add_head(&obj->exec_node,
- &eb->buckets[obj->exec_handle & eb->and]);
+ int i;
+
+ spin_lock(&file->table_lock);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+ if (obj == NULL) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ return -ENOENT;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ return -EINVAL;
+ }
+
+ drm_gem_object_reference(&obj->base);
+ list_add_tail(&obj->exec_list, &eb->objects);
+
+ obj->exec_entry = &exec[i];
+ if (eb->and < 0) {
+ eb->lut[i] = obj;
+ } else {
+ uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+ obj->exec_handle = handle;
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[handle & eb->and]);
+ }
+ }
+ spin_unlock(&file->table_lock);
+
+ return 0;
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
- struct hlist_head *head;
- struct hlist_node *node;
- struct drm_i915_gem_object *obj;
+ if (eb->and < 0) {
+ if (handle >= -eb->and)
+ return NULL;
+ return eb->lut[handle];
+ } else {
+ struct hlist_head *head;
+ struct hlist_node *node;
- head = &eb->buckets[handle & eb->and];
- hlist_for_each(node, head) {
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
- }
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ struct drm_i915_gem_object *obj;
- return NULL;
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+ return NULL;
+ }
}
static void
eb_destroy(struct eb_objects *eb)
{
+ while (!list_empty(&eb->objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&eb->objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
kfree(eb);
}
@@ -150,17 +222,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_DEBUG("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- return ret;
- }
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -220,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
@@ -299,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct eb_objects *eb,
- struct list_head *objects)
+ struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -313,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
@@ -335,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@@ -376,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- entry->offset = obj->gtt_offset;
+ if (entry->offset != obj->gtt_offset) {
+ entry->offset = obj->gtt_offset;
+ *need_reloc = true;
+ }
+
+ if (entry->flags & EXEC_OBJECT_WRITE) {
+ obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+ }
+
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+ !obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
return 0;
}
@@ -402,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct list_head *objects)
+ struct list_head *objects,
+ bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
@@ -430,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
else
list_move_tail(&obj->exec_list, &ordered_objects);
- obj->base.pending_read_domains = 0;
+ obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
@@ -470,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -480,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -500,21 +575,22 @@ err: /* Decrement pin count for bound objects */
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct list_head *objects,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- int count)
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
+ bool need_relocs;
int *reloc_offset;
int i, total, ret;
+ int count = args->buffer_count;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(objects)) {
- obj = list_first_entry(objects,
+ while (!list_empty(&eb->objects)) {
+ obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
@@ -582,27 +658,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- for (i = 0; i < count; i++) {
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- ret = -ENOENT;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
- ret = i915_gem_execbuffer_reserve(ring, file, objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
@@ -623,44 +688,11 @@ err:
}
static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
-{
- u32 plane, flip_mask;
- int ret;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
-
- return 0;
-}
-
-static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
- uint32_t flips = 0;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@@ -671,18 +703,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- if (obj->base.pending_write_domain)
- flips |= atomic_read(&obj->pending_flip);
-
flush_domains |= obj->base.write_domain;
}
- if (flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
- if (ret)
- return ret;
- }
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);
@@ -698,6 +721,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
+ if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+ return false;
+
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
@@ -711,6 +737,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
+ if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+ return -EINVAL;
+
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
@@ -718,9 +747,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
@@ -742,8 +768,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
+ if (obj->base.write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+ obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring);
@@ -802,21 +830,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 mask;
- u32 flags;
+ u32 mask, flags;
int ret, mode, i;
+ bool need_relocs;
- if (!i915_gem_check_execbuffer(args)) {
- DRM_DEBUG("execbuf with invalid offset/length\n");
+ if (!i915_gem_check_execbuffer(args))
return -EINVAL;
- }
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
@@ -937,7 +962,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args->buffer_count);
+ eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -945,51 +970,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- INIT_LIST_HEAD(&objects);
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- /* prevent error path from reading uninitialized data */
- ret = -ENOENT;
- goto err;
- }
-
- if (!list_empty(&obj->exec_list)) {
- DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
- obj, exec[i].handle, i);
- ret = -EINVAL;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, &objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(objects.prev,
+ batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (need_relocs)
+ ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
- &objects, eb,
- exec,
- args->buffer_count);
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1011,7 +1013,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret)
goto err;
@@ -1065,20 +1067,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
- while (!list_empty(&objects)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
- }
mutex_unlock(&dev->struct_mutex);
@@ -1187,7 +1180,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a..926a1e2dd23 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-static inline gtt_pte_t pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = gen6_pte_encode(ppgtt->dev,
+ ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *pages,
+ unsigned first_entry,
+ enum i915_cache_level cache_level)
{
+ gtt_pte_t *pt_vaddr;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j, m, segment_len;
+ dma_addr_t page_addr;
+ struct scatterlist *sg;
+
+ /* init sg walking */
+ sg = pages->sgl;
+ i = 0;
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+
+ while (i < pages->nents) {
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+ page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
+ cache_level);
+
+ /* grab the next page */
+ if (++m == segment_len) {
+ if (++i == pages->nents)
+ break;
+
+ sg = sg_next(sg);
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+ }
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(ppgtt->dev->pdev,
+ ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+ kfree(ppgtt);
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ret;
+ first_pd_entry_in_global_pt =
+ gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
- ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->clear_range = gen6_ppgtt_clear_range;
+ ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
- goto err_ppgtt;
+ return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
- if (dev_priv->mm.gtt->needs_dmar) {
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
- *ppgtt->num_pd_entries,
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
+ ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
- 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
+ if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
- }
- ppgtt->pt_dma_addr[i] = pt_addr;
}
+ ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+ ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
- i915_ppgtt_clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
- dev_priv->mm.aliasing_ppgtt = ppgtt;
-
return 0;
err_pd_pin:
@@ -186,94 +241,57 @@ err_pt_alloc:
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
-err_ppgtt:
- kfree(ppgtt);
return ret;
}
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
- return;
+ return -ENOMEM;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
+ ppgtt->dev = dev;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
- kfree(ppgtt);
+ ret = gen6_ppgtt_init(ppgtt);
+ if (ret)
+ kfree(ppgtt);
+ else
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return ret;
}
-static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- const struct sg_table *pages,
- unsigned first_entry,
- enum i915_cache_level cache_level)
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
- gtt_pte_t *pt_vaddr;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned i, j, m, segment_len;
- dma_addr_t page_addr;
- struct scatterlist *sg;
-
- /* init sg walking */
- sg = pages->sgl;
- i = 0;
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
-
- while (i < pages->nents) {
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
- page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
- cache_level);
-
- /* grab the next page */
- if (++m == segment_len) {
- if (++i == pages->nents)
- break;
-
- sg = sg_next(sg);
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
- }
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- kunmap_atomic(pt_vaddr);
+ if (!ppgtt)
+ return;
- first_pte = 0;
- act_pd++;
- }
+ ppgtt->cleanup(ppgtt);
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->insert_entries(ppgtt, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- i915_ppgtt_clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -282,7 +300,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
+ gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return;
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
+ pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+ if (unlikely(dev_priv->gtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+ if (unlikely(dev_priv->gtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
-
-static void i915_ggtt_clear_range(struct drm_device *dev,
- unsigned first_entry,
- unsigned num_entries)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- gtt_pte_t scratch_pte;
- gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- int i;
-
- if (INTEL_INFO(dev)->gen < 6) {
- intel_gtt_clear_range(first_entry, num_entries);
- return;
- }
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
- readl(gtt_base);
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
- (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+ dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+ dev_priv->gtt.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
@@ -423,16 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level level)
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level level)
{
- struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
- const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ gtt_pte_t __iomem *gtt_entries =
+ (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
@@ -441,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ iowrite32(gen6_pte_encode(dev, addr, level),
+ &gtt_entries[i]);
i++;
}
}
- BUG_ON(i > max_entries);
- BUG_ON(i != obj->base.size / PAGE_SIZE);
-
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
@@ -456,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1])
+ != gen6_pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -466,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
+ I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- if (INTEL_INFO(dev)->gen < 6) {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- flags);
- } else {
- gen6_ggtt_bind_object(obj, cache_level);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- i915_ggtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -524,27 +567,101 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
-
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mm_node *entry;
+ struct drm_i915_gem_object *obj;
+ unsigned long hole_start, hole_end;
- /* Substract the guard page ... */
+ BUG_ON(mappable_end > end);
+
+ /* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ /* Mark any preallocated objects as occupied */
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+ DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+ obj->gtt_offset, obj->base.size);
+
+ BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_offset,
+ obj->base.size,
+ false);
+ obj->has_global_gtt_mapping = 1;
+ }
+
+ dev_priv->gtt.start = start;
+ dev_priv->gtt.total = end - start;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+ hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+ (hole_end-hole_start) / PAGE_SIZE);
+ }
- /* ... but ensure that we clear the entire range. */
- i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ /* And finally clear the reserved guard page */
+ dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+
+ gtt_size = dev_priv->gtt.total;
+ mappable_size = dev_priv->gtt.mappable_end;
+
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ int ret;
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (!ret)
+ return;
+
+ DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+ gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
static int setup_scratch_page(struct drm_device *dev)
@@ -567,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->mm.gtt->scratch_page = page;
- dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+ dev_priv->gtt.scratch_page = page;
+ dev_priv->gtt.scratch_page_dma = dma_addr;
return 0;
}
@@ -576,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ set_pages_wb(dev_priv->gtt.scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->mm.gtt->scratch_page);
- __free_page(dev_priv->mm.gtt->scratch_page);
+ put_page(dev_priv->gtt.scratch_page);
+ __free_page(dev_priv->gtt.scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -590,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
-static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
-static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@@ -606,103 +723,127 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
return stolen_decoder[snb_gmch_ctl] << 20;
}
-int i915_gem_gtt_init(struct drm_device *dev)
+static int gen6_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
+ unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
- /* On modern platforms we need not worry ourself with the legacy
- * hostbridge query stuff. Skip it entirely
- */
- if (INTEL_INFO(dev)->gen < 6) {
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- intel_gmch_remove();
- return -ENODEV;
- }
- return 0;
+ /* 64/512MB is the current min/max we actually know of, but this is just
+ * a coarse sanity check.
+ */
+ if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%lx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
}
- dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
- if (!dev_priv->mm.gtt)
- return -ENOMEM;
-
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
-#ifdef CONFIG_INTEL_IOMMU
- dev_priv->mm.gtt->needs_dmar = 1;
-#endif
+ if (IS_GEN7(dev))
+ *stolen = gen7_get_stolen_size(snb_gmch_ctl);
+ else
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+ *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
- dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
- /* i9xx_setup */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- dev_priv->mm.gtt->gtt_total_entries =
- gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
- if (INTEL_INFO(dev)->gen < 7)
- dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- else
- dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
- dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
- /* 64/512MB is the current min/max we actually know of, but this is just a
- * coarse sanity check.
- */
- if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
- dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
- DRM_ERROR("Unknown GMADR entries (%d)\n",
- dev_priv->mm.gtt->gtt_mappable_entries);
- ret = -ENXIO;
- goto err_out;
+ dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ if (!dev_priv->gtt.gsm) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ return -ENOMEM;
}
ret = setup_scratch_page(dev);
- if (ret) {
+ if (ret)
DRM_ERROR("Scratch setup failed\n");
- goto err_out;
- }
- dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
- dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
- if (!dev_priv->mm.gtt->gtt) {
- DRM_ERROR("Failed to map the gtt page table\n");
- teardown_scratch_page(dev);
- ret = -ENOMEM;
- goto err_out;
+ dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+ return ret;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->gtt.gsm);
+ teardown_scratch_page(dev_priv->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
}
- /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
- DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
- DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
- DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+ intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+ dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
return 0;
+}
-err_out:
- kfree(dev_priv->mm.gtt);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- return ret;
+static void i915_gmch_remove(struct drm_device *dev)
+{
+ intel_gmch_remove();
}
-void i915_gem_gtt_fini(struct drm_device *dev)
+int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->mm.gtt->gtt);
- teardown_scratch_page(dev);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- kfree(dev_priv->mm.gtt);
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ unsigned long gtt_size;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ dev_priv->gtt.gtt_probe = i915_gmch_probe;
+ dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ } else {
+ dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+ dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+ }
+
+ ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+ &dev_priv->gtt.stolen_size,
+ &gtt->mappable_base,
+ &gtt->mappable_end);
+ if (ret)
+ return ret;
+
+ gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_INFO("Memory usable by graphics device = %zdM\n",
+ dev_priv->gtt.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+ dev_priv->gtt.mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
+ dev_priv->gtt.stolen_size >> 20);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083b126..69d97cbac13 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,85 +42,73 @@
* for is a boon.
*/
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
-#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
*/
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(pdev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
+#if 0
+ } else if (IS_GEN3(dev)) {
u8 val;
+ /* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
+ }
- return base + offset;
+ return base;
}
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
- DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
+static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size <<= 1, 4096, 0);
+ if (!compressed_fb)
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
@@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb)
goto err_fb;
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
+ dev_priv->compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + compressed_fb->start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
}
+ dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
+ return 0;
-err_llb:
- drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
+ return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return -ENODEV;
+
+ if (size < dev_priv->cfb_size)
+ return 0;
+
+ /* Release any current block */
+ i915_gem_stolen_cleanup_compression(dev);
+
+ return i915_setup_compression(dev, size);
}
-static void i915_cleanup_compression(struct drm_device *dev)
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->cfb_size == 0)
+ return;
+
+ if (dev_priv->compressed_fb)
+ drm_mm_put_block(dev_priv->compressed_fb);
+
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
+
+ dev_priv->cfb_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_gem_stolen_cleanup_compression(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
+
+ return 0;
+}
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+ u32 offset, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st;
+ struct scatterlist *sg;
+
+ DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+ BUG_ON(offset > dev_priv->gtt.stolen_size - size);
- /* Leave 1M for line length buffer & misc. */
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return NULL;
}
- return 0;
+ sg = st->sgl;
+ sg->offset = offset;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_len(sg) = size;
+
+ return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ BUG();
+ return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ /* Should only be called during free */
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .get_pages = i915_gem_object_get_pages_stolen,
+ .put_pages = i915_gem_object_put_pages_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+ struct drm_mm_node *stolen)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
+
+ if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
+ goto cleanup;
+
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+ obj->pages = i915_pages_create_for_stolen(dev,
+ stolen->start, stolen->size);
+ if (obj->pages == NULL)
+ goto cleanup;
+
+ obj->has_dma_mapping = true;
+ obj->pages_pin_count = 1;
+ obj->stolen = stolen;
+
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+
+cleanup:
+ i915_gem_object_free(obj);
+ return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return NULL;
+
+ DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+ if (size == 0)
+ return NULL;
+
+ stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (stolen)
+ stolen = drm_mm_get_block(stolen, size, 4096);
+ if (stolen == NULL)
+ return NULL;
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj)
+ return obj;
+
+ drm_mm_put_block(stolen);
+ return NULL;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_put_block(obj->stolen);
+ obj->stolen = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3df..abcba2f5a78 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false;
}
- /*
- * Previous chips need to be aligned to the size of the smallest
- * fence register that can contain the object.
- */
- if (INTEL_INFO(obj->base.dev)->gen == 3)
- size = 1024*1024;
- else
- size = 512*1024;
-
- while (size < obj->base.size)
- size <<= 1;
-
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
return false;
@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
- obj->base.size,
- args->tiling_mode);
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
@@ -396,6 +385,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
+
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fe843389c7b..2cd97d1cc92 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -355,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
+ dev_priv->gpu_error.hangcheck_count = 0;
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- bool blc_event;
atomic_inc(&dev_priv->irq_received);
@@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT);
}
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
+ if (pch_iir & SDE_AUX_MASK)
+ dp_aux_irq_handler(dev);
+
if (pch_iir & SDE_GMBUS)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
SDE_AUDIO_POWER_SHIFT_CPT);
if (pch_iir & SDE_AUX_MASK_CPT)
- DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+ dp_aux_irq_handler(dev);
if (pch_iir & SDE_GMBUS_CPT)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
@@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
- (!IS_GEN6(dev) || pm_iir == 0))
+ if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
goto done;
ret = IRQ_HANDLED;
@@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
}
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -838,23 +862,60 @@ done:
*/
static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- error_work);
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+ gpu_error);
struct drm_device *dev = dev_priv->dev;
+ struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
+ int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
- if (atomic_read(&dev_priv->mm.wedged)) {
+ /*
+ * Note that there's only one work item which does gpu resets, so we
+ * need not worry about concurrent gpu resets potentially incrementing
+ * error->reset_counter twice. We only need to take care of another
+ * racing irq/hangcheck declaring the gpu dead for a second time. A
+ * quick check for that is good enough: schedule_work ensures the
+ * correct ordering between hang detection and this work item, and since
+ * the reset in-progress bit is only ever set by code outside of this
+ * work we don't need to worry about any other races.
+ */
+ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+ reset_event);
+
+ ret = i915_reset(dev);
+
+ if (ret == 0) {
+ /*
+ * After all the gem state is reset, increment the reset
+ * counter and wake up everyone waiting for the reset to
+ * complete.
+ *
+ * Since unlock operations are a one-sided barrier only,
+ * we need to insert a barrier here to order any seqno
+ * updates before
+ * the counter increment.
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+ kobject_uevent_env(&dev->primary->kdev.kobj,
+ KOBJ_CHANGE, reset_done_event);
+ } else {
+ atomic_set(&error->reset_counter, I915_WEDGED);
}
- complete_all(&dev_priv->error_completion);
+
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@@ -915,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
@@ -924,10 +985,18 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
@@ -1074,6 +1143,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
+ default:
+ BUG();
}
}
@@ -1222,9 +1293,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags;
int i, pipe;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
@@ -1235,7 +1306,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ DRM_INFO("capturing error event; look for more information in"
+ "/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
kref_init(&error->ref);
@@ -1318,12 +1390,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error == NULL) {
- dev_priv->first_error = error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
error = NULL;
}
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
i915_error_state_free(&error->ref);
@@ -1335,10 +1407,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error;
unsigned long flags;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- dev_priv->first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1459,17 +1531,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
- INIT_COMPLETION(dev_priv->error_completion);
- atomic_set(&dev_priv->mm.wedged, 1);
+ atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ &dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so they don't hang
+ * Wakeup waiting processes so that the reset work item
+ * doesn't deadlock trying to grab various locks.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->error_work);
+ queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1700,7 +1773,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hangcheck_count++ > 1) {
+ if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
@@ -1759,25 +1832,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
return;
}
i915_get_extra_instdone(dev, instdone);
- if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
+ if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd)) == 0 &&
+ memcmp(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
- memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
+ memcpy(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd));
+ memcpy(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone));
}
repeat:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -1847,7 +1924,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
* This register is the same on all known PCH chips.
*/
-static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+static void ibx_enable_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
@@ -1860,14 +1937,36 @@ static void ironlake_enable_pch_hotplug(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 mask;
+
+ if (HAS_PCH_IBX(dev))
+ mask = SDE_HOTPLUG_MASK |
+ SDE_GMBUS |
+ SDE_AUX_MASK;
+ else
+ mask = SDE_HOTPLUG_MASK_CPT |
+ SDE_GMBUS_CPT |
+ SDE_AUX_MASK_CPT;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, ~mask);
+ I915_WRITE(SDEIER, mask);
+ POSTING_READ(SDEIER);
+
+ ibx_enable_hotplug(dev);
+}
+
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1895,27 +1994,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- if (HAS_PCH_CPT(dev)) {
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- } else {
- hotplug_mask = (SDE_CRT_HOTPLUG |
- SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG |
- SDE_PORTD_HOTPLUG |
- SDE_AUX_MASK);
- }
-
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1935,9 +2014,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB;
+ DE_PLANEA_FLIP_DONE_IVB |
+ DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1961,18 +2040,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
return 0;
}
@@ -1981,7 +2049,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
@@ -2010,6 +2077,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2018,6 +2088,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2038,13 +2109,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static void valleyview_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
/* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2055,8 +2135,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- return 0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -2286,6 +2364,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
@@ -2296,15 +2377,25 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2318,10 +2409,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2481,7 +2568,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
@@ -2502,6 +2588,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
/*
* Enable some error detection, note the instruction error mask
@@ -2522,14 +2609,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i965_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
@@ -2556,10 +2656,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2655,6 +2751,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
@@ -2706,10 +2805,16 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+ i915_hangcheck_elapsed,
+ (unsigned long) dev);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@@ -2730,7 +2835,8 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
- } else if (IS_IVYBRIDGE(dev)) {
+ dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2738,14 +2844,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (IS_HASWELL(dev)) {
- /* Share interrupts handling with IVB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2764,13 +2862,23 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 59afb7eb6db..527b664d343 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -141,8 +141,15 @@
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
-#define VGA_SR_INDEX 0x3c4
-#define VGA_SR_DATA 0x3c5
+/*
+ * SR01 is the only VGA register touched on non-UMS setups.
+ * VLV doesn't do UMS, so the sequencer index/data registers
+ * are the only VGA registers which need to include
+ * display_mmio_offset.
+ */
+#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
+#define SR01 1
+#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
@@ -301,6 +308,7 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
@@ -335,17 +343,19 @@
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
+ *
+ * DPIO is VLV only.
*/
-#define DPIO_PKT 0x2100
+#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
-#define DPIO_DATA 0x2104
-#define DPIO_REG 0x2108
-#define DPIO_CTL 0x2110
+#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
+#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
+#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
@@ -556,13 +566,13 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
-#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GCFG_DIS (1<<8)
-#define VLV_IIR_RW 0x182084
-#define VLV_IER 0x1820a0
-#define VLV_IIR 0x1820a4
-#define VLV_IMR 0x1820a8
-#define VLV_ISR 0x1820ac
+#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -735,6 +745,7 @@
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
@@ -921,8 +932,8 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define _DPLL_A 0x06014
-#define _DPLL_B 0x06018
+#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
@@ -943,23 +954,6 @@
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
-#define SRX_INDEX 0x3c4
-#define SRX_DATA 0x3c5
-#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
-
-#define PPCR 0x61204
-#define PPCR_ON (1<<0)
-
-#define DVOB 0x61140
-#define DVOB_ON (1<<31)
-#define DVOC 0x61160
-#define DVOC_ON (1<<31)
-#define LVDS 0x61180
-#define LVDS_ON (1<<31)
-
-/* Scratch pad debug 0 reg:
- */
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -998,7 +992,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -1035,7 +1029,7 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
@@ -1178,15 +1172,15 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
-#define FW_BLC_SELF_VLV 0x6500
+#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
/*
* Palette regs
*/
-#define _PALETTE_A 0x0a000
-#define _PALETTE_B 0x0a800
+#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1242,6 +1236,10 @@
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+/** snb MCH registers for priority tuning */
+#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define MCH_SSKPD_WM0_MASK 0x3f
+#define MCH_SSKPD_WM0_VAL 0xc
/* Clocking configuration register */
#define CLKCFG 0x10c00
@@ -1551,26 +1549,26 @@
*/
/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _PIPEASRC 0x6001c
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
+#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
+#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
+#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
+#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
+#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
+#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
+#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
+#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
+#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
+#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
+#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
+#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
+#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
+#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
+#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
+#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
+#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
+#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
@@ -1631,13 +1629,10 @@
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN 0x61110
-#define HDMIB_HOTPLUG_INT_EN (1 << 29)
-#define DPB_HOTPLUG_INT_EN (1 << 29)
-#define HDMIC_HOTPLUG_INT_EN (1 << 28)
-#define DPC_HOTPLUG_INT_EN (1 << 28)
-#define HDMID_HOTPLUG_INT_EN (1 << 27)
-#define DPD_HOTPLUG_INT_EN (1 << 27)
+#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -1658,21 +1653,14 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT 0x61114
+#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
-#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (3 << 21)
-#define DPC_HOTPLUG_INT_STATUS (3 << 19)
-#define DPB_HOTPLUG_INT_STATUS (3 << 17)
-/* HDMI bits are shared with the DP bits */
-#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
-#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
-#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
-#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -1877,7 +1865,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
-#define PFIT_CONTROL 0x61230
+#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -1895,9 +1883,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS 0x61234
-#define PFIT_VERT_SCALE_MASK 0xfff00000
-#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -1909,7 +1895,7 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS 0x61238
+#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
@@ -2639,10 +2625,10 @@
/* Display & cursor control */
/* Pipe A */
-#define _PIPEADSL 0x70000
+#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
-#define _PIPEACONF 0x70008
+#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2671,18 +2657,19 @@
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
-#define PIPECONF_BPP_MASK (0x000000e0)
-#define PIPECONF_BPP_8 (0<<5)
-#define PIPECONF_BPP_10 (1<<5)
-#define PIPECONF_BPP_6 (2<<5)
-#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_BPC_MASK (0x7 << 5)
+#define PIPECONF_8BPC (0<<5)
+#define PIPECONF_10BPC (1<<5)
+#define PIPECONF_6BPC (2<<5)
+#define PIPECONF_12BPC (3<<5)
#define PIPECONF_DITHER_EN (1<<4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
#define PIPECONF_DITHER_TYPE_SP (0<<2)
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT 0x70024
+#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2693,7 +2680,7 @@
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
@@ -2703,7 +2690,7 @@
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
@@ -2719,11 +2706,6 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
-#define PIPE_8BPC (0 << 5)
-#define PIPE_10BPC (1 << 5)
-#define PIPE_6BPC (2 << 5)
-#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@@ -2732,7 +2714,7 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
-#define VLV_DPFLIPSTAT 0x70028
+#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -2746,7 +2728,7 @@
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define DPINVGTT 0x7002c /* VLV only */
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -2774,7 +2756,7 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-#define DSPFW1 0x70034
+#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
@@ -2782,11 +2764,11 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
-#define DSPFW2 0x70038
+#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
-#define DSPFW3 0x7003c
+#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -2798,13 +2780,13 @@
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
-#define VLV_DDL1 0x70050
+#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
-#define VLV_DDL2 0x70054
+#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
@@ -2948,10 +2930,10 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define _PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define _PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
@@ -2962,11 +2944,12 @@
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR 0x70080
+#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -2983,16 +2966,16 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define _CURABASE 0x70084
-#define _CURAPOS 0x70088
+#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
+#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR 0x700c0
-#define _CURBBASE 0x700c4
-#define _CURBPOS 0x700c8
+#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
+#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
+#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
@@ -3007,7 +2990,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
-#define _DSPACNTR 0x70180
+#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3028,6 +3011,7 @@
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
@@ -3040,14 +3024,14 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define _DSPAADDR 0x70184
-#define _DSPASTRIDE 0x70188
-#define _DSPAPOS 0x7018C /* reserved */
-#define _DSPASIZE 0x70190
-#define _DSPASURF 0x7019C /* 965+ only */
-#define _DSPATILEOFF 0x701A4 /* 965+ only */
-#define _DSPAOFFSET 0x701A4 /* HSW */
-#define _DSPASURFLIVE 0x701AC
+#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
+#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
+#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
+#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
+#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
+#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
+#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
+#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3068,44 +3052,44 @@
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
-#define SWF00 0x71410
-#define SWF01 0x71414
-#define SWF02 0x71418
-#define SWF03 0x7141c
-#define SWF04 0x71420
-#define SWF05 0x71424
-#define SWF06 0x71428
-#define SWF10 0x70410
-#define SWF11 0x70414
-#define SWF14 0x71420
-#define SWF30 0x72414
-#define SWF31 0x72418
-#define SWF32 0x7241c
+#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
/* Pipe B */
-#define _PIPEBDSL 0x71000
-#define _PIPEBCONF 0x71008
-#define _PIPEBSTAT 0x71024
-#define _PIPEBFRAMEHIGH 0x71040
-#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define _DSPBCNTR 0x71180
+#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR 0x71184
-#define _DSPBSTRIDE 0x71188
-#define _DSPBPOS 0x7118C
-#define _DSPBSIZE 0x71190
-#define _DSPBSURF 0x7119C
-#define _DSPBTILEOFF 0x711A4
-#define _DSPBOFFSET 0x711A4
-#define _DSPBSURFLIVE 0x711AC
+#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3116,6 +3100,7 @@
#define DVS_FORMAT_RGBX101010 (1<<25)
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_PIPE_CSC_ENABLE (1<<24)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_XBGR (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
@@ -3183,7 +3168,7 @@
#define SPRITE_FORMAT_RGBX161616 (3<<25)
#define SPRITE_FORMAT_YUV444 (4<<25)
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_PIPE_CSC_ENABLE (1<<24)
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
@@ -3254,6 +3239,8 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
+#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
+
/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -3294,41 +3281,41 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define _PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define _PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
-#define _PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define PIPE_DATA_M2_OFFSET 0
-#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define PIPE_DATA_N2_OFFSET 0
-#define _PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define PIPE_LINK_M1_OFFSET 0
-#define _PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define PIPE_LINK_N1_OFFSET 0
-#define _PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define PIPE_LINK_M2_OFFSET 0
-#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define _PIPEB_DATA_M1 0x61030
-#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
+#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
-#define _PIPEB_DATA_M2 0x61038
-#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
+#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
-#define _PIPEB_LINK_M1 0x61040
-#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
+#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
-#define _PIPEB_LINK_M2 0x61048
-#define _PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
+#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
@@ -3581,27 +3568,30 @@
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
-#define PORTD_HOTPLUG_NO_DETECT (0)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
-#define PORTC_HOTPLUG_NO_DETECT (0)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
-#define PORTB_HOTPLUG_NO_DETECT (0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
@@ -3722,13 +3712,13 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60200
-#define VLV_VIDEO_DIP_DATA_A 0x60208
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-#define VLV_VIDEO_DIP_CTL_B 0x61170
-#define VLV_VIDEO_DIP_DATA_B 0x61174
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
@@ -3820,8 +3810,6 @@
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
@@ -3927,7 +3915,7 @@
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -4020,17 +4008,17 @@
#define LVDS_DETECTED (1 << 1)
/* vlv has 2 sets of panel control regs. */
-#define PIPEA_PP_STATUS 0x61200
-#define PIPEA_PP_CONTROL 0x61204
-#define PIPEA_PP_ON_DELAYS 0x61208
-#define PIPEA_PP_OFF_DELAYS 0x6120c
-#define PIPEA_PP_DIVISOR 0x61210
-
-#define PIPEB_PP_STATUS 0x61300
-#define PIPEB_PP_CONTROL 0x61304
-#define PIPEB_PP_ON_DELAYS 0x61308
-#define PIPEB_PP_OFF_DELAYS 0x6130c
-#define PIPEB_PP_DIVISOR 0x61310
+#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -4211,7 +4199,9 @@
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -4280,8 +4270,8 @@
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4322,7 +4312,7 @@
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define G4X_AUD_VID_DID 0x62020
+#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -4438,10 +4428,10 @@
#define AUDIO_CP_READY_C (1<<9)
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
@@ -4524,6 +4514,7 @@
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
@@ -4657,4 +4648,51 @@
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
+#define _PIPE_A_CSC_COEFF_BY 0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
+#define _PIPE_A_CSC_COEFF_BU 0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
+#define _PIPE_A_CSC_COEFF_BV 0x49024
+#define _PIPE_A_CSC_MODE 0x49028
+#define _PIPE_A_CSC_PREOFF_HI 0x49030
+#define _PIPE_A_CSC_PREOFF_ME 0x49034
+#define _PIPE_A_CSC_PREOFF_LO 0x49038
+#define _PIPE_A_CSC_POSTOFF_HI 0x49040
+#define _PIPE_A_CSC_POSTOFF_ME 0x49044
+#define _PIPE_A_CSC_POSTOFF_LO 0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
+#define _PIPE_B_CSC_COEFF_BY 0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
+#define _PIPE_B_CSC_COEFF_BU 0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
+#define _PIPE_B_CSC_COEFF_BV 0x49124
+#define _PIPE_B_CSC_MODE 0x49128
+#define _PIPE_B_CSC_PREOFF_HI 0x49130
+#define _PIPE_B_CSC_PREOFF_ME 0x49134
+#define _PIPE_B_CSC_PREOFF_LO 0x49138
+#define _PIPE_B_CSC_POSTOFF_HI 0x49140
+#define _PIPE_B_CSC_POSTOFF_ME 0x49144
+#define _PIPE_B_CSC_POSTOFF_LO 0x49148
+
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39d..2135f21ea45 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,67 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = _PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
@@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ udelay(150);
+
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
@@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev)
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
-static void i915_save_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- return;
-}
-
-static void i915_restore_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- return;
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
- i915_save_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
@@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: regfile.save TV & SDVO state */
- }
-
/* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
@@ -690,16 +248,8 @@ static void i915_save_display(struct drm_device *dev)
}
}
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
- else
- dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
-
- i915_save_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
- }
-
- /* This is only meaningful in non-KMS mode */
- /* Don't restore them in KMS mode */
- i915_restore_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_display_reg(dev);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
@@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
- }
-
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
@@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
- /* VGA state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- else
- I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- i915_restore_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_vga(dev);
+ else
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 00000000000..985a0971623
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,503 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = _PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+
+ return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ return;
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7e..969d08c72d1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0;
- save_adpa = adpa = I915_READ(PCH_ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(PCH_ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, save_adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ POSTING_READ(crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
- save_adpa = adpa = I915_READ(ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(ADPA, save_adpa);
+ I915_WRITE(crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, adpa);
+ POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
@@ -684,7 +685,6 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -800,10 +800,14 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
/*
- * TODO: find a proper way to discover whether we need to set the
- * polarity reversal bit or not, instead of relying on the BIOS.
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
*/
- if (HAS_PCH_LPT(dev))
- dev_priv->fdi_rx_polarity_reversed =
- !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4bad0f72401..d64af5aa4a1 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
@@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
{
int port;
- if (IS_HASWELL(dev)) {
- for (port = PORT_A; port < PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port, false);
+ if (!HAS_DDI(dev))
+ return;
- /* DDI E is the suggested one to work in FDI mode, so program is as such by
- * default. It will have to be re-programmed in case a digital DP output
- * will be detected on it
- */
- intel_prepare_ddi_buffers(dev, PORT_E, true);
- }
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such
+ * by default. It will have to be re-programmed in case a digital DP
+ * output will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
}
static const long hsw_ddi_buf_ctl_values[] = {
@@ -178,10 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
- ((intel_crtc->fdi_lanes - 1) << 19);
- if (dev_priv->fdi_rx_polarity_reversed)
- rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -203,7 +203,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
@@ -675,10 +678,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
+ intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
- intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP = intel_dig_port->port_reversal |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
@@ -985,7 +992,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
- temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ /* Can only use the always-on power well for eDP when
+ * not using the panel fitter, and when not using motion
+ * blur mitigation (which we don't support). */
+ if (dev_priv->pch_pf_size)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
@@ -1069,7 +1082,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
- cpu_transcoder = pipe;
+ cpu_transcoder = (enum transcoder) pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1285,34 +1298,58 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
+
+ if (intel_crtc->eld_vld) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
}
+
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1452,11 +1489,11 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -1497,6 +1534,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_dig_port->port = port;
+ intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
else
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da1ad9c80bb..a05ac2c91ba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -154,8 +154,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
@@ -168,8 +168,8 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
- unsigned long flags;
- u32 val = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(DPIO_REG, reg);
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
- goto out_unlock;
+ return 0;
}
- val = I915_READ(DPIO_DATA);
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return val;
+ return I915_READ(DPIO_DATA);
}
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
- unsigned long flags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return;
}
I915_WRITE(DPIO_DATA, val);
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static void vlv_init_dpio(struct drm_device *dev)
@@ -472,61 +463,14 @@ static void vlv_init_dpio(struct drm_device *dev)
POSTING_READ(DPIO_CTL);
}
-static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_dual_link_lvds[] = {
- {
- .callback = intel_dual_link_lvds_callback,
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
- },
- },
- { } /* terminating entry */
-};
-
-static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
- unsigned int reg)
-{
- unsigned int val;
-
- /* use the module option value if specified */
- if (i915_lvds_channel_mode > 0)
- return i915_lvds_channel_mode == 2;
-
- if (dmi_check_system(intel_dual_link_lvds))
- return true;
-
- if (dev_priv->lvds_val)
- val = dev_priv->lvds_val;
- else {
- /* BIOS should set the proper LVDS register value at boot, but
- * in reality, it doesn't set the value when the lid is closed;
- * we need to check "the value to be set" in VBT when LVDS
- * register is uninitialized.
- */
- val = I915_READ(reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
- val = dev_priv->bios_lvds_val;
- dev_priv->lvds_val = val;
- }
- return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
-}
-
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ if (intel_is_dual_link_lvds(dev)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -550,11 +494,10 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -686,19 +629,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (I915_READ(LVDS)) != 0) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
*/
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -751,7 +691,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
@@ -766,8 +705,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -1047,6 +985,51 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ return true;
+ }
+ } else {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
@@ -1125,8 +1108,8 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- if (IS_HASWELL(dev_priv->dev)) {
- /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ if (HAS_DDI(dev_priv->dev)) {
+ /* DDI does not have a specific FDI_TX register */
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1170,7 +1153,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_DDI(dev_priv->dev))
return;
reg = FDI_TX_CTL(pipe);
@@ -1231,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
- cur_state = !!(val & PIPECONF_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
+ !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
+ cur_state = false;
+ } else {
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ }
+
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
@@ -1509,13 +1498,14 @@ static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1530,24 +1520,21 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
- goto out_unlock;
+ return;
}
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1561,14 +1548,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
- goto out_unlock;
+ return 0;
}
- value = I915_READ(SBI_DATA);
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return value;
+ return I915_READ(SBI_DATA);
}
/**
@@ -1700,8 +1683,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
- val &= ~PIPE_BPC_MASK;
- val |= pipeconf_val & PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
@@ -1728,7 +1711,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
/* Workaround: set timing override bit. */
@@ -1816,11 +1799,11 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- enum transcoder pch_transcoder;
+ enum pipe pch_transcoder;
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
@@ -1836,7 +1819,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
- assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
@@ -2017,18 +2001,29 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
{
- int tile_rows, tiles;
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
- tile_rows = *y / 8;
- *y %= 8;
- tiles = *x / (512/bpp);
- *x %= 512/bpp;
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
- return tile_rows * pitch * 8 + tiles * 4096;
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2105,9 +2100,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2198,9 +2193,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2242,10 +2237,6 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
- atomic_read(&obj->pending_flip) == 0);
-
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
@@ -2350,43 +2341,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_FREQ_MASK;
-
- if (clock < 200000) {
- u32 temp;
- dpa_ctl |= DP_PLL_FREQ_160MHZ;
- /* workaround for 160Mhz:
- 1) program 0x4600c bits 15:0 = 0x8124
- 2) program 0x46010 bit 0 = 1
- 3) program 0x46034 bit 24 = 1
- 4) program 0x64000 bit 14 = 1
- */
- temp = I915_READ(0x4600c);
- temp &= 0xffff0000;
- I915_WRITE(0x4600c, temp | 0x8124);
-
- temp = I915_READ(0x46010);
- I915_WRITE(0x46010, temp | 1);
-
- temp = I915_READ(0x46034);
- I915_WRITE(0x46034, temp | (1 << 24));
- } else {
- dpa_ctl |= DP_PLL_FREQ_270MHZ;
- }
- I915_WRITE(DP_A, dpa_ctl);
-
- POSTING_READ(DP_A);
- udelay(500);
-}
-
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2815,7 +2769,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
@@ -2828,18 +2782,14 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
POSTING_READ(reg);
udelay(200);
- /* On Haswell, the PLL configuration for ports and pipes is handled
- * separately, as part of DDI setup */
- if (!IS_HASWELL(dev)) {
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
- }
+ POSTING_READ(reg);
+ udelay(100);
}
}
@@ -2889,7 +2839,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
@@ -2918,7 +2868,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
@@ -2929,10 +2879,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
- if (atomic_read(&dev_priv->mm.wedged))
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -2950,6 +2902,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (crtc->fb == NULL)
return;
+ WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
@@ -2992,6 +2946,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
@@ -3066,6 +3022,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
udelay(24);
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -3146,7 +3104,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev) &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3623,7 +3581,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
- intel_crtc->cpu_transcoder = intel_crtc->pipe;
+ intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
intel_ddi_put_crtc_pll(crtc);
}
@@ -3664,6 +3622,11 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
intel_enable_pll(dev_priv, pipe);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
@@ -3686,6 +3649,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 pctl;
if (!intel_crtc->active)
@@ -3705,6 +3669,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable pannel fitter if it is on this pipe. */
+ pctl = I915_READ(PFIT_CONTROL);
+ if ((pctl & PFIT_ENABLE) &&
+ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
@@ -3767,19 +3738,17 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, enable);
}
-static void intel_crtc_noop(struct drm_crtc *crtc)
-{
-}
-
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
+ intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@@ -3817,10 +3786,6 @@ void intel_modeset_disable(struct drm_device *dev)
}
}
-void intel_encoder_noop(struct drm_encoder *encoder)
-{
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -4012,16 +3977,8 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-struct fdi_m_n {
- u32 tu;
- u32 gmch_m;
- u32 gmch_n;
- u32 link_m;
- u32 link_n;
-};
-
static void
-fdi_reduce_ratio(u32 *num, u32 *den)
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
@@ -4029,20 +3986,18 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-static void
-ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
- int link_clock, struct fdi_m_n *m_n)
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n)
{
- m_n->tu = 64; /* default size */
-
- /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->tu = 64;
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
- fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
-
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
- fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4289,51 +4244,6 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
-static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 temp;
-
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock->p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(LVDS, temp);
-}
-
static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4349,6 +4259,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bool is_sdvo;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4432,6 +4344,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4443,6 +4357,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
bool is_sdvo;
@@ -4511,12 +4426,9 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -4555,6 +4467,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
@@ -4588,12 +4501,9 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
I915_WRITE(DPLL(pipe), dpll);
@@ -4783,10 +4693,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
/* default to 8bpc */
- pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
@@ -4794,7 +4704,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
}
@@ -4981,6 +4891,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
if (!has_vga)
return;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
@@ -5123,6 +5035,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -5177,19 +5091,19 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val = I915_READ(PIPECONF(pipe));
- val &= ~PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->bpp) {
case 18:
- val |= PIPE_6BPC;
+ val |= PIPECONF_6BPC;
break;
case 24:
- val |= PIPE_8BPC;
+ val |= PIPECONF_8BPC;
break;
case 30:
- val |= PIPE_10BPC;
+ val |= PIPECONF_10BPC;
break;
case 36:
- val |= PIPE_12BPC;
+ val |= PIPECONF_12BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
@@ -5206,10 +5120,80 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
else
val |= PIPECONF_PROGRESSIVE;
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+ else
+ val &= ~PIPECONF_COLOR_RANGE_SELECT;
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint16_t coeff = 0x7800; /* 1.0 */
+
+ /*
+ * TODO: Check what kind of values actually come out of the pipe
+ * with these coeff/postoff values and adjust to get the best
+ * accuracy. Perhaps we even need to take the bpc value into
+ * consideration.
+ */
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+ /*
+ * GY/GU and RY/RU should be the other way around according
+ * to BSpec, but reality doesn't agree. Just set them up in
+ * a way that results in the correct picture.
+ */
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ postoff = (16 * (1 << 13) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
@@ -5400,7 +5384,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct intel_encoder *intel_encoder, *edp_encoder = NULL;
- struct fdi_m_n m_n = {0};
+ struct intel_link_m_n m_n = {0};
int target_clock, pixel_multiplier, lane, link_bw;
bool is_dp = false, is_cpu_edp = false;
@@ -5452,8 +5436,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
- &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@ -5506,7 +5489,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
- (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ intel_is_dual_link_lvds(dev))
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
@@ -5581,7 +5564,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither, fdi_config_ok;
@@ -5645,54 +5627,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else
intel_put_pch_pll(intel_crtc);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- /* For non-DP output, clear any trans DP clock recovery setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
@@ -5727,9 +5667,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
@@ -5747,6 +5684,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
return fdi_config_ok ? ret : -EINVAL;
}
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = false;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ if (crtc->pipe != PIPE_A && crtc->base.enabled)
+ enable = true;
+ /* XXX: Should check for edp transcoder here, but thanks to init
+ * sequence that's not yet available. Just in case desktop eDP
+ * on PORT D is possible on haswell, too. */
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->connectors_active)
+ enable = true;
+ }
+
+ /* Even the eDP panel fitter is outside the always-on well. */
+ if (dev_priv->pch_pf_size)
+ enable = true;
+
+ intel_set_power_well(dev, enable);
+}
+
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5759,20 +5725,13 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ bool is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5786,11 +5745,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_cpu_edp)
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
- else
- intel_crtc->cpu_transcoder = pipe;
-
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@@ -5806,147 +5760,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock,
- &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
- }
-
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
-
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
- fp);
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its
- * own on pre-Haswell/LPT generation */
- if (!is_cpu_edp) {
- struct intel_pch_pll *pll;
-
- pll = intel_get_pch_pll(intel_crtc, dpll, fp);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
- pipe);
- return -EINVAL;
- }
- } else
- intel_put_pch_pll(intel_crtc);
-
- /* The LVDS pin pair needs to be on before the DPLLs are
- * enabled. This is an exception to the general rule that
- * mode_set doesn't turn things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether
- * we're going to set the DPLLs for dual-channel mode or
- * not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP |
- LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode
- * (LVDS_A3_POWER_UP) appropriately here, but we need to
- * look more thoroughly into how panels behave in the
- * two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- /* For non-DP output, clear any trans DP clock recovery
- * setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
- }
intel_crtc->lowfreq_avail = false;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- if (intel_crtc->pch_pll) {
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(intel_crtc->pch_pll->pll_reg);
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
- }
-
- if (intel_crtc->pch_pll) {
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
- intel_crtc->lowfreq_avail = true;
- } else {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- }
- }
- }
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
if (!is_dp || is_cpu_edp)
ironlake_set_m_n(crtc, mode, adjusted_mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
haswell_set_pipeconf(crtc, adjusted_mode, dither);
+ intel_set_pipe_csc(crtc, adjusted_mode);
+
/* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -5972,6 +5811,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
+ if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
@@ -6068,6 +5912,7 @@ static void haswell_write_eld(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@@ -6109,6 +5954,7 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+ intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -6344,6 +6190,8 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
+ if (IS_HASWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -6700,6 +6548,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
if (encoder->crtc) {
crtc = encoder->crtc;
+ mutex_lock(&crtc->mutex);
+
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -6729,6 +6579,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
return false;
}
+ mutex_lock(&crtc->mutex);
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -6756,13 +6607,15 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ mutex_unlock(&crtc->mutex);
return false;
}
- if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
+ mutex_unlock(&crtc->mutex);
return false;
}
@@ -6777,27 +6630,31 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- struct drm_crtc *crtc = encoder->crtc;
-
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
- if (old->release_fb)
- old->release_fb->funcs->destroy(old->release_fb);
+ if (old->release_fb) {
+ drm_framebuffer_unregister_private(old->release_fb);
+ drm_framebuffer_unreference(old->release_fb);
+ }
+ mutex_unlock(&crtc->mutex);
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ mutex_unlock(&crtc->mutex);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -6993,11 +6850,6 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
-}
-
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
if (!i915_powersave)
@@ -7007,12 +6859,11 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
if (!crtc->fb)
continue;
- if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_increase_pllclock(crtc);
+ intel_decrease_pllclock(crtc);
}
}
-void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -7025,7 +6876,7 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
continue;
if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_decrease_pllclock(crtc);
+ intel_increase_pllclock(crtc);
}
}
@@ -7109,9 +6960,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
- atomic_clear_mask(1 << intel_crtc->plane,
- &obj->pending_flip.counter);
- wake_up(&dev_priv->pending_flip_queue);
+ wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
@@ -7474,11 +7323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->enable_stall_check = true;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
+ intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -7494,7 +7340,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7514,7 +7359,6 @@ free_work:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_noop,
};
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
@@ -7904,16 +7748,21 @@ intel_modeset_check_state(struct drm_device *dev)
}
}
-bool intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
- bool ret = true;
+ int ret = 0;
+
+ saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ if (!saved_mode)
+ return -ENOMEM;
+ saved_hwmode = saved_mode + 1;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
@@ -7924,8 +7773,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
- saved_hwmode = crtc->hwmode;
- saved_mode = crtc->mode;
+ *saved_hwmode = crtc->hwmode;
+ *saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
@@ -7936,7 +7785,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
if (modeset_pipes) {
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
if (IS_ERR(adjusted_mode)) {
- return false;
+ ret = PTR_ERR(adjusted_mode);
+ goto out;
}
}
@@ -7962,11 +7812,11 @@ bool intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = !intel_crtc_mode_set(&intel_crtc->base,
- mode, adjusted_mode,
- x, y, fb);
- if (!ret)
- goto done;
+ ret = intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (ret)
+ goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7987,16 +7837,23 @@ bool intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
- if (!ret && crtc->enabled) {
- crtc->hwmode = saved_hwmode;
- crtc->mode = saved_mode;
+ if (ret && crtc->enabled) {
+ crtc->hwmode = *saved_hwmode;
+ crtc->mode = *saved_mode;
} else {
intel_modeset_check_state(dev);
}
+out:
+ kfree(saved_mode);
return ret;
}
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+}
+
#undef for_each_intel_crtc_masked
static void intel_set_config_free(struct intel_set_config *config)
@@ -8109,7 +7966,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct intel_encoder *encoder;
int count, ro;
- /* The upper layers ensure that we either disabl a crtc or have a list
+ /* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
@@ -8211,14 +8068,9 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
- if (!set->mode)
- set->fb = NULL;
-
- /* The fb helper likes to play gross jokes with ->mode_set_config.
- * Unfortunately the crtc helper doesn't do much at all for this case,
- * so we have to cope with this madness until the fb helper is fixed up. */
- if (set->fb && set->num_connectors == 0)
- return 0;
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
@@ -8262,11 +8114,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
drm_mode_debug_printmodeline(set->mode);
}
- if (!intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d]\n",
- set->crtc->base.id);
- ret = -EINVAL;
+ ret = intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb);
+ if (ret) {
+ DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
goto fail;
}
} else if (config->fb_changed) {
@@ -8283,8 +8135,8 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
- !intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
out_config:
@@ -8303,7 +8155,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
static void intel_cpu_pll_init(struct drm_device *dev)
{
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
@@ -8439,11 +8291,10 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(IS_HASWELL(dev) &&
- (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
int found;
/* Haswell uses DDI functions to detect digital outputs */
@@ -8490,23 +8341,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- int found;
-
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
- if (I915_READ(SDVOB) & PORT_DETECTED) {
- /* SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, SDVOB, true);
- if (!found)
- intel_hdmi_init(dev, SDVOB, PORT_B);
- if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
- if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -8666,14 +8512,15 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
- intel_fb->obj = obj;
return 0;
}
@@ -8703,7 +8550,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -8765,8 +8612,9 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
- } else
- dev_priv->display.update_wm = NULL;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
+ }
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -8888,6 +8736,18 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -8916,12 +8776,7 @@ static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
@@ -8936,10 +8791,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
- */
- intel_init_power_wells(dev);
+ intel_init_power_well(dev);
intel_prepare_ddi(dev);
@@ -8981,7 +8833,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
+ dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -8999,6 +8851,9 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
}
static void
@@ -9180,20 +9035,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-static void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ i915_disable_vga(dev);
}
}
@@ -9209,7 +9058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_connector *connector;
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
if (tmp & TRANS_DDI_FUNC_ENABLE) {
@@ -9250,7 +9099,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
crtc->active ? "enabled" : "disabled");
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9301,9 +9150,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
if (force_restore) {
for_each_pipe(pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- intel_set_mode(&crtc->base, &crtc->base.mode,
- crtc->base.x, crtc->base.y, crtc->base.fb);
+ intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
}
i915_redisable_vga(dev);
@@ -9367,6 +9214,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
flush_scheduled_work();
drm_mode_config_cleanup(dev);
+
+ intel_cleanup_overlay(dev);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb3715b4b09..f61cb7998c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
return max_link_bw;
}
-static int
-intel_dp_link_clock(uint8_t link_bw)
-{
- if (link_bw == DP_LINK_BW_2_7)
- return 270000;
- else
- return 162000;
-}
-
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
@@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
bool adjust_mode)
{
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_link_clock =
+ drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
@@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+ uint32_t status;
+ bool done;
+
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ if (has_aux_irq)
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ else
+ done = wait_for_atomic(C, 10) == 0;
+ if (!done)
+ DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+ has_aux_irq);
+#undef C
+
+ return status;
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
- int i;
- int recv_bytes;
+ int i, ret, recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
@@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
@@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = I915_READ(ch_ctl);
+ status = I915_READ_NOTRACE(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if (try == 3) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Must try at least 3 times according to DP spec */
@@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- for (;;) {
- status = I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- udelay(100);
- }
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Check for timeout or receive error.
@@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
/* Unload any bytes sent back from the other side */
@@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
- return recv_bytes;
+ ret = recv_bytes;
+out:
+ pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ return ret;
}
/* Write data to the aux channel in native mode */
@@ -718,16 +763,35 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+ if (intel_dp->color_range_auto) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ else
+ intel_dp->color_range = 0;
+ }
+
+ if (intel_dp->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+ int link_bw_clock =
+ drm_dp_bw_code_to_link_rate(bws[clock]);
+ int link_avail = intel_dp_max_data_rate(link_bw_clock,
+ lane_count);
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ adjusted_mode->clock = link_bw_clock;
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -742,39 +806,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
}
-struct intel_dp_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-static void
-intel_reduce_ratio(uint32_t *num, uint32_t *den)
-{
- while (*num > 0xffffff || *den > 0xffffff) {
- *num >>= 1;
- *den >>= 1;
- }
-}
-
-static void
-intel_dp_compute_m_n(int bpp,
- int nlanes,
- int pixel_clock,
- int link_clock,
- struct intel_dp_m_n *m_n)
-{
- m_n->tu = 64;
- m_n->gmch_m = (pixel_clock * bpp) >> 3;
- m_n->gmch_n = link_clock * nlanes;
- intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- m_n->link_m = pixel_clock;
- m_n->link_n = link_clock;
- intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
-}
-
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -785,7 +816,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
- struct intel_dp_m_n m_n;
+ struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
@@ -808,8 +839,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -851,6 +882,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
}
}
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (clock < 200000) {
+ /* For a long time we've carried around a ILK-DevA w/a for the
+ * 160MHz clock. If we're really unlucky, it's still required.
+ */
+ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ }
+
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ udelay(500);
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -926,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
- intel_dp->DP |= intel_dp->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -950,6 +1008,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1057,6 +1118,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
@@ -1543,7 +1606,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set)
+intel_gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -1641,7 +1704,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
-intel_dp_signal_levels_hsw(uint8_t train_set)
+intel_hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -1673,6 +1736,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
}
}
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t signal_levels, mask;
+ uint8_t train_set = intel_dp->train_set[0];
+
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_hsw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_gen7_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ } else {
+ signal_levels = intel_gen4_signal_levels(train_set);
+ mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+ }
+
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ *DP = (*DP & ~mask) | signal_levels;
+}
+
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
@@ -1696,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), temp);
- if (wait_for((I915_READ(DP_TP_STATUS(port)) &
- DP_TP_STATUS_IDLE_DONE), 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ if (port != PORT_A) {
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ }
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
@@ -1791,7 +1886,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
@@ -1809,24 +1904,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
- uint32_t signal_levels;
-
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(
- intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
- signal_levels);
+
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1882,7 +1961,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1892,8 +1970,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
cr_tries = 0;
channel_eq = false;
for (;;) {
- /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
@@ -1902,19 +1978,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1964,6 +2028,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
/*
@@ -1981,7 +2047,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -1998,7 +2064,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
POSTING_READ(intel_dp->output_reg);
- msleep(17);
+ /* We don't really know why we're doing this */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -2018,19 +2085,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- if (crtc == NULL) {
- /* We can arrive here never having been attached
- * to a CRTC, for instance, due to inheriting
- * random state from the BIOS.
- *
- * If the pipe is not running, play safe and
- * wait for the clocks to stabilise before
- * continuing.
- */
+ if (WARN_ON(crtc == NULL)) {
+ /* We should never try to disable a port without a crtc
+ * attached. For paranoia keep the code around for a
+ * bit. */
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -2042,10 +2104,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
@@ -2206,6 +2274,8 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
@@ -2216,6 +2286,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ return connector_status_disconnected;
+
return intel_dp_detect_dpcd(intel_dp);
}
@@ -2224,17 +2297,18 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
- switch (intel_dp->output_reg) {
- case DP_B:
- bit = DPB_HOTPLUG_LIVE_STATUS;
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS;
break;
- case DP_C:
- bit = DPC_HOTPLUG_LIVE_STATUS;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS;
break;
- case DP_D:
- bit = DPD_HOTPLUG_LIVE_STATUS;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
@@ -2290,13 +2364,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
return intel_ddc_get_modes(connector, adapter);
}
-
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
@@ -2306,7 +2373,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
- char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2315,10 +2381,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
-
if (status != connector_status_connected)
return status;
@@ -2419,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_dp->color_range)
- return 0;
-
- intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_dp->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -2445,11 +2518,8 @@ intel_dp_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_encoder->base.crtc) {
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_encoder->base.crtc)
+ intel_crtc_restore_mode(intel_encoder->base.crtc);
return 0;
}
@@ -2491,7 +2561,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -2566,6 +2635,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
@@ -2755,7 +2825,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2767,15 +2837,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
name = "DPDDC-A";
break;
case PORT_B:
- dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case PORT_C:
- dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case PORT_D:
- dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0..07ebac6fe8c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -109,6 +109,11 @@
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+/*
+ * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
+ * to be used.
+ */
+#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -153,6 +158,7 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
@@ -205,6 +211,7 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
+ bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@@ -228,6 +235,9 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
+
+ /* reset counter value when the last flip was submitted */
+ unsigned int reset_counter;
};
struct intel_plane {
@@ -283,6 +293,9 @@ struct cxsr_latency {
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
+#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -337,9 +350,11 @@ struct intel_hdmi {
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
+ bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
+ bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
@@ -356,6 +371,7 @@ struct intel_dp {
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
+ bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
@@ -377,6 +393,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
+ u32 port_reversal;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
@@ -439,10 +456,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
-extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
+extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -502,12 +519,12 @@ struct intel_set_config {
bool mode_changed;
};
-extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
@@ -546,6 +563,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -589,6 +609,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
@@ -627,9 +648,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
-extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch);
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -648,7 +670,8 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
-extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_init_power_well(struct drm_device *dev);
+extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5..00e70dbe82d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4e..981bdce3634 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -57,9 +57,10 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct intel_fbdev *ifbdev,
+static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
@@ -83,7 +84,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_object_create_stolen(dev, size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -133,14 +136,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ /* If the object is shmemfs backed, it will have given us zeroed pages.
+ * If the object is stolen however, it will be full of whatever
+ * garbage was left in there.
+ */
+ if (ifbdev->ifb.obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@@ -173,26 +182,10 @@ out:
return ret;
}
-static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = intelfb_create(ifbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
- .fb_probe = intel_fb_find_or_create_single,
+ .fb_probe = intelfb_create,
};
static void intel_fbdev_destroy(struct drm_device *dev,
@@ -212,6 +205,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
+ drm_framebuffer_unregister_private(&ifb->base);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
drm_gem_object_unreference_unlocked(&ifb->obj->base);
@@ -241,10 +235,18 @@ int intel_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
- drm_fb_helper_initial_config(&ifbdev->helper, 32);
+
return 0;
}
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+}
+
void intel_fbdev_fini(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -280,7 +282,7 @@ void intel_fb_restore_mode(struct drm_device *dev)
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
@@ -288,7 +290,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
/* Be sure to shut off any planes that may be active */
list_for_each_entry(plane, &config->plane_list, head)
- plane->funcs->disable_plane(plane);
+ if (plane->enabled)
+ plane->funcs->disable_plane(plane);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d9..fa8ec4a2604 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
- enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
@@ -340,7 +341,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
- avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+ if (intel_hdmi->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
}
@@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
@@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case SDVOC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
@@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
@@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case HDMIC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
- case HDMID:
+ case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
@@ -766,46 +776,38 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- return true;
-}
-
-static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
-{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
- bit = HDMIB_HOTPLUG_LIVE_STATUS;
- break;
- case SDVOC:
- bit = HDMIC_HOTPLUG_LIVE_STATUS;
- break;
- default:
- bit = 0;
- break;
+ if (intel_hdmi->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_hdmi->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_hdmi->color_range = 0;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ if (intel_hdmi->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
+ return true;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
- if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
- return status;
-
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
+ intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@@ -817,6 +819,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
@@ -902,21 +906,29 @@ intel_hdmi_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_hdmi->color_range)
- return 0;
-
- intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_hdmi->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
return -EINVAL;
done:
- if (intel_dig_port->base.base.crtc) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_dig_port->base.base.crtc)
+ intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
return 0;
}
@@ -931,7 +943,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -957,6 +968,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_hdmi->color_range_auto = true;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -980,15 +992,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
break;
case PORT_A:
/* Internal port only for eDP. */
@@ -1013,7 +1025,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b81..acf8aec9ada 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+ u32 gmbus2_status,
+ u32 gmbus4_irq_en)
+{
+ int i;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus2 = 0;
+ DEFINE_WAIT(wait);
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves. */
+ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+ for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+ prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
+ if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+ break;
+
+ schedule_timeout(1);
+ }
+ finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ if (gmbus2 & gmbus2_status)
+ return 0;
+ return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ return wait_for(C, 10);
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (ret)
+ return 0;
+ else
+ return -ETIMEDOUT;
+#undef C
+}
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
@@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
while (len) {
int ret;
u32 val, loop = 0;
- u32 gmbus2;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
val = I915_READ(GMBUS3 + reg_offset);
do {
@@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
- u32 gmbus2;
val = loop = 0;
do {
@@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
}
return 0;
}
@@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u32 gmbus2;
-
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
@@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (ret == -ENXIO)
goto clear_err;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
+ if (ret == -ENXIO)
+ goto clear_err;
if (ret)
goto timeout;
- if (gmbus2 & GMBUS_SATOER)
- goto clear_err;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -405,8 +456,7 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -465,10 +515,13 @@ int intel_setup_gmbus(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
+ init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74258a..3d1d97488cc 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,8 @@ struct intel_lvds_encoder {
u32 pfit_control;
u32 pfit_pgm_ratios;
- bool pfit_dirty;
+ bool is_dual_link;
+ u32 reg;
struct intel_lvds_connector *attached_connector;
};
@@ -71,15 +72,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lvds_reg, tmp;
-
- if (HAS_PCH_SPLIT(dev)) {
- lvds_reg = PCH_LVDS;
- } else {
- lvds_reg = LVDS;
- }
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp;
- tmp = I915_READ(lvds_reg);
+ tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
return false;
@@ -92,6 +88,91 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
return true;
}
+/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *fixed_mode =
+ lvds_encoder->attached_connector->base.panel.fixed_mode;
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(lvds_encoder->reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+
+ /* Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg. */
+ if (INTEL_INFO(dev)->gen == 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ I915_WRITE(lvds_encoder->reg, temp);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
+ return;
+
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ enc->pfit_control,
+ enc->pfit_pgm_ratios);
+
+ I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, enc->pfit_control);
+}
+
/**
* Sets the power state for the panel.
*/
@@ -101,38 +182,20 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-
- if (lvds_encoder->pfit_dirty) {
- /*
- * Enable automatic panel scaling so that non-native modes
- * fill the screen. The panel fitter should only be
- * adjusted whilst the pipe is disabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- lvds_encoder->pfit_control,
- lvds_encoder->pfit_pgm_ratios);
-
- I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
- lvds_encoder->pfit_dirty = false;
- }
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- POSTING_READ(lvds_reg);
+ POSTING_READ(lvds_encoder->reg);
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
@@ -144,15 +207,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
@@ -162,13 +223,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (lvds_encoder->pfit_control) {
- I915_WRITE(PFIT_CONTROL, 0);
- lvds_encoder->pfit_dirty = true;
- }
-
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_encoder->reg);
}
static int intel_lvds_mode_valid(struct drm_connector *connector,
@@ -406,7 +462,6 @@ out:
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
- lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -492,13 +547,14 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
};
/*
- * Lid events. Note the use of 'modeset_on_lid':
- * - we set it on lid close, and reset it on open
+ * Lid events. Note the use of 'modeset':
+ * - we set it to MODESET_ON_LID_OPEN on lid close,
+ * and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly
- * set/reset)
- * - the suspend/resume paths will also set it to
- * zero, since they restore the mode ("lid open").
+ * duplicate events where it was already properly set)
+ * - the suspend/resume paths will set it to
+ * MODESET_SUSPENDED and ignore the lid open event,
+ * because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
@@ -512,6 +568,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+ goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
@@ -520,21 +579,24 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
- return NOTIFY_OK;
+ goto exit;
if (!acpi_lid_open()) {
- dev_priv->modeset_on_lid = 1;
- return NOTIFY_OK;
+ /* do modeset on next lid open event */
+ dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+ goto exit;
}
- if (!dev_priv->modeset_on_lid)
- return NOTIFY_OK;
-
- dev_priv->modeset_on_lid = 0;
+ if (dev_priv->modeset_restore == MODESET_DONE)
+ goto exit;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
+
+ dev_priv->modeset_restore = MODESET_DONE;
+exit:
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
@@ -591,8 +653,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
}
}
@@ -602,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -895,6 +955,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_lvds_encoder *lvds_encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type == INTEL_OUTPUT_LVDS) {
+ lvds_encoder = to_lvds_encoder(&encoder->base);
+
+ return lvds_encoder->is_dual_link;
+ }
+ }
+
+ return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_device *dev = lvds_encoder->base.base.dev;
+ unsigned int val;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(lvds_encoder->reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
@@ -980,6 +1100,8 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
+ intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1001,6 +1123,12 @@ bool intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->reg = PCH_LVDS;
+ } else {
+ lvds_encoder->reg = LVDS;
+ }
+
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base,
@@ -1101,6 +1229,10 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
/*
* Unlock registers and just
* leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adc..0e860f39933 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,7 +28,6 @@
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
-#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -101,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector)
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
- { 0, "Full" },
- { 1, "Limited 16:235" },
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
void
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934..4d338740f2c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev)
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
if (acpi_is_video_device(acpi_dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a..67a2501d519 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
@@ -1045,13 +1045,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1075,7 +1075,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_free;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
@@ -1157,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
kfree(params);
@@ -1165,7 +1165,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1241,7 +1241,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
return -ENODEV;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
@@ -1307,7 +1307,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
- if (!reg_bo)
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
goto out_free;
overlay->reg_bo = reg_bo;
@@ -1432,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a..a3730e0289e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,6 +321,9 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -356,12 +359,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
}
set_level:
- /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
- * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
- * registers are set.
+ /* Check the current backlight level and try to set again if it's zero.
+ * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
+ * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
*/
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+ if (!intel_panel_get_backlight(dev))
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3280cffe50f..61fee7fcdc2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -447,12 +447,6 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
@@ -486,6 +480,14 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
+ if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
@@ -533,6 +535,7 @@ out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
+ i915_gem_stolen_cleanup_compression(dev);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2286,7 +2289,6 @@ err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -3581,6 +3583,19 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(MCH_SSKPD);
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+ DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+ DRM_INFO("This can cause pipe underruns and display issues.\n");
+ DRM_INFO("Please upgrade your BIOS to fix this.\n");
+ }
+}
+
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3673,6 +3688,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3684,6 +3701,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
+ /* WaVSRefCountFullforceMissDisable */
+ if (IS_HASWELL(dev_priv->dev))
+ reg &= ~GEN7_FF_VS_REF_CNT_FFME;
+
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@@ -3854,6 +3875,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -4047,35 +4070,57 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
-/* Starting with Haswell, we have different power wells for
- * different parts of the GPU. This attempts to enable them all.
- */
-void intel_init_power_wells(struct drm_device *dev)
+void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long power_wells[] = {
- HSW_PWR_WELL_CTL1,
- HSW_PWR_WELL_CTL2,
- HSW_PWR_WELL_CTL4
- };
- int i;
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
if (!IS_HASWELL(dev))
return;
- mutex_lock(&dev->struct_mutex);
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE;
- for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
- int well = I915_READ(power_wells[i]);
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
- if ((well & HSW_PWR_WELL_STATE) == 0) {
- I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
- DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+void intel_init_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ /* For now, we need the power well to be always enabled. */
+ intel_set_power_well(dev, true);
+
+ /* We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now. */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
/* Set up chip specific power management-related functions */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 42ff97d667d..1d5d613eb6b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -318,6 +318,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -331,7 +332,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -467,6 +468,9 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (pc->cpu_page == NULL)
goto err_unpin;
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ ring->name, pc->gtt_offset);
+
pc->obj = obj;
ring->private = pc;
return 0;
@@ -613,6 +617,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
return 0;
}
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->last_seqno < seqno;
+}
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -643,11 +654,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
if (ret)
return ret;
- intel_ring_emit(waiter,
- dw1 | signaller->semaphore_register[waiter->id]);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, 0);
- intel_ring_emit(waiter, MI_NOOP);
+ /* If seqno wrap happened, omit the wait with no-ops */
+ if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ intel_ring_emit(waiter,
+ dw1 |
+ signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ } else {
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ }
intel_ring_advance(waiter);
return 0;
@@ -728,6 +748,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
+static void
+ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
@@ -735,6 +761,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return pc->cpu_page[0];
}
+static void
+pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct pipe_control *pc = ring->private;
+ pc->cpu_page[0] = seqno;
+}
+
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
@@ -1164,7 +1197,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret;
}
- obj = i915_gem_alloc_object(dev, ring->size);
+ obj = NULL;
+ if (!HAS_LLC(dev))
+ obj = i915_gem_object_create_stolen(dev, ring->size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
@@ -1182,7 +1219,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1348,7 +1385,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
msleep(1);
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(jiffies, end));
@@ -1410,14 +1448,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
+static int __intel_ring_begin(struct intel_ring_buffer *ring,
+ int bytes)
+{
+ int ret;
+
+ if (unlikely(ring->tail + bytes > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < bytes)) {
+ ret = ring_wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ring->space -= bytes;
+ return 0;
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int n = 4*num_dwords;
int ret;
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
@@ -1426,20 +1485,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
- if (unlikely(ring->tail + n > ring->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
- }
+ return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+}
- if (unlikely(ring->space < n)) {
- ret = ring_wait_for_space(ring, n);
- if (unlikely(ret))
- return ret;
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ BUG_ON(ring->outstanding_lazy_request);
+
+ if (INTEL_INFO(ring->dev)->gen >= 6) {
+ I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
}
- ring->space -= n;
- return 0;
+ ring->set_seqno(ring, seqno);
}
void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1447,7 +1507,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
ring->tail &= ring->size - 1;
- if (dev_priv->stop_rings & intel_ring_flag(ring))
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
@@ -1604,6 +1664,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@@ -1614,6 +1675,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@@ -1624,6 +1686,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1695,6 +1758,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1755,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
@@ -1770,6 +1835,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
@@ -1799,6 +1865,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd0572..d66208c2c48 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring_buffer {
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
+ void (*set_seqno)(struct intel_ring_buffer *ring,
+ u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags);
@@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ring->status_page.page_addr[reg];
}
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+ int reg, u32 value)
+{
+ ring->status_page.page_addr[reg] = value;
+}
+
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
}
void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36..d07a8cdf998 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -103,6 +103,7 @@ struct intel_sdvo {
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
+ bool color_range_auto;
/**
* This is set if we're going to treat the device as TV-out.
@@ -125,6 +126,7 @@ struct intel_sdvo {
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
+ bool rgb_quant_range_selectable;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ if (intel_sdvo->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ if (intel_sdvo->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_sdvo->has_hdmi_monitor &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_sdvo->color_range = 0;
+ }
+
+ if (intel_sdvo->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
return true;
}
@@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_sdvo->is_hdmi)
+ if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ intel_sdvo->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
+ intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
@@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_sdvo->color_range)
- return 0;
-
- intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_sdvo->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -1997,11 +2033,8 @@ set_value:
done:
- if (intel_sdvo->base.base.crtc) {
- struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_sdvo->base.base.crtc)
+ intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
return 0;
#undef CHECK_PROPERTY
@@ -2010,7 +2043,6 @@ done:
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.mode_fixup = intel_sdvo_mode_fixup,
.mode_set = intel_sdvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
@@ -2200,13 +2232,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
}
static void
-intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_sdvo->color_range_auto = true;
+ }
}
static bool
@@ -2254,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d7b060e0a23..1b6eb76beb7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -50,6 +50,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
sprctl = I915_READ(SPRCTL(pipe));
@@ -89,6 +90,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
+ if (IS_HASWELL(dev))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -103,27 +107,23 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- if (!dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = true;
+ dev_priv->sprite_scaling_enabled |= 1 << pipe;
+
+ if (!scaling_was_enabled) {
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else {
- if (dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- intel_update_watermarks(dev);
- }
- }
+ } else
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -141,6 +141,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static void
@@ -150,6 +154,7 @@ ivb_disable_plane(struct drm_plane *plane)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
@@ -159,8 +164,11 @@ ivb_disable_plane(struct drm_plane *plane)
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
- dev_priv->sprite_scaling_enabled = false;
- intel_update_watermarks(dev);
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static int
@@ -287,8 +295,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
@@ -593,7 +601,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -606,7 +614,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
ret = intel_plane->update_colorkey(plane, set);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -622,7 +630,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -635,7 +643,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
intel_plane->get_colorkey(plane, get);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c127..d808421c1c8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
out:
return ret;
}
@@ -1488,7 +1487,6 @@ out:
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.mode_fixup = intel_tv_mode_fixup,
.mode_set = intel_tv_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 2f486481d79..d2253f63948 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
return ret;
}
-static int mgag200fb_create(struct mga_fbdev *mfbdev,
+static int mgag200fb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
@@ -209,23 +211,6 @@ out:
return ret;
}
-static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = mgag200fb_create(mfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
@@ -247,6 +232,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
}
drm_fb_helper_fini(&mfbdev->helper);
vfree(mfbdev->sysram);
+ drm_framebuffer_unregister_private(&mfb->base);
drm_framebuffer_cleanup(&mfb->base);
return 0;
@@ -255,7 +241,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
- .fb_probe = mga_fb_find_or_create_single,
+ .fb_probe = mgag200fb_create,
};
int mgag200_fbdev_init(struct mga_device *mdev)
@@ -277,6 +263,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(mdev->dev);
+
drm_fb_helper_initial_config(&mfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 70dd3c5529d..64297c72464 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -23,16 +23,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs mga_fb_funcs = {
.destroy = mga_user_framebuffer_destroy,
- .create_handle = mga_user_framebuffer_create_handle,
};
int mgag200_framebuffer_init(struct drm_device *dev,
@@ -40,13 +32,15 @@ int mgag200_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 8a55beeb8bd..a7ff6d5a34b 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,8 +11,9 @@ config DRM_NOUVEAU
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
- select ACPI_WMI if ACPI
- select MXM_WMI if ACPI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ select MXM_WMI if ACPI && X86
select POWER_SUPPLY
help
Choose this option for open-source nVidia support.
@@ -52,26 +53,3 @@ config DRM_NOUVEAU_BACKLIGHT
help
Say Y here if you want to control the backlight of your display
(e.g. a laptop panel).
-
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index ab25752a0b1..90f9140eeef 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/event.o
nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
@@ -40,6 +41,11 @@ nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/nv04.o
+nouveau-y += core/subdev/bus/nv31.o
+nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nvc0.o
nouveau-y += core/subdev/clock/nv04.o
nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
@@ -85,9 +91,16 @@ nouveau-y += core/subdev/gpio/base.o
nouveau-y += core/subdev/gpio/nv10.o
nouveau-y += core/subdev/gpio/nv50.o
nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/gpio/nve0.o
nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/anx9805.o
nouveau-y += core/subdev/i2c/aux.o
nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/nv04.o
+nouveau-y += core/subdev/i2c/nv4e.o
+nouveau-y += core/subdev/i2c/nv50.o
+nouveau-y += core/subdev/i2c/nv94.o
+nouveau-y += core/subdev/i2c/nvd0.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
nouveau-y += core/subdev/instmem/base.o
@@ -106,10 +119,15 @@ nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/fannil.o
+nouveau-y += core/subdev/therm/fanpwm.o
+nouveau-y += core/subdev/therm/fantog.o
nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/temp.o
nouveau-y += core/subdev/therm/nv40.o
nouveau-y += core/subdev/therm/nv50.o
-nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/therm/nva3.o
+nouveau-y += core/subdev/therm/nvd0.o
nouveau-y += core/subdev/timer/base.o
nouveau-y += core/subdev/timer/nv04.o
nouveau-y += core/subdev/vm/base.o
@@ -132,6 +150,7 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/base.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
nouveau-y += core/engine/disp/nv84.o
@@ -141,11 +160,13 @@ nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
nouveau-y += core/engine/disp/hdanvd0.o
nouveau-y += core/engine/disp/hdminv84.o
nouveau-y += core/engine/disp/hdminva3.o
nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/piornv50.o
nouveau-y += core/engine/disp/sornv50.o
nouveau-y += core/engine/disp/sornv94.o
nouveau-y += core/engine/disp/sornvd0.o
@@ -194,7 +215,8 @@ nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
-nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
+nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
@@ -216,7 +238,10 @@ nouveau-y += nouveau_mem.o
# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+ifdef CONFIG_X86
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 8bbb58f94a1..295c22165ea 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -99,3 +99,13 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend)
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
}
+
+const char *
+nouveau_client_name(void *obj)
+{
+ const char *client_name = "unknown";
+ struct nouveau_client *client = nouveau_client(obj);
+ if (client)
+ client_name = client->name;
+ return client_name;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index 7cc7133d82d..dd434790ccc 100644
--- a/drivers/gpu/drm/nouveau/core/core/enum.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -40,14 +40,15 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value)
return NULL;
}
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
{
en = nouveau_enum_find(en, value);
if (en)
- printk("%s", en->name);
+ pr_cont("%s", en->name);
else
- printk("(unknown enum 0x%08x)", value);
+ pr_cont("(unknown enum 0x%08x)", value);
+ return en;
}
void
@@ -55,7 +56,7 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
{
while (bf->name) {
if (value & bf->mask) {
- printk(" %s", bf->name);
+ pr_cont(" %s", bf->name);
value &= ~bf->mask;
}
@@ -63,5 +64,5 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
}
if (value)
- printk(" (unknown bits 0x%08x)", value);
+ pr_cont(" (unknown bits 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 00000000000..6d01e0f0fc8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/event.h>
+
+static void
+nouveau_event_put_locked(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ if (!--event->index[index].refs)
+ event->disable(event, index);
+ list_del(&handler->head);
+}
+
+void
+nouveau_event_put(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr)
+ nouveau_event_put_locked(event, index, handler);
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr) {
+ list_add(&handler->head, &event->index[index].list);
+ if (!event->index[index].refs++)
+ event->enable(event, index);
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_trigger(struct nouveau_event *event, int index)
+{
+ struct nouveau_eventh *handler, *temp;
+ unsigned long flags;
+
+ if (index >= event->index_nr)
+ return;
+
+ spin_lock_irqsave(&event->lock, flags);
+ list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
+ if (handler->func(handler, index) == NVKM_EVENT_DROP) {
+ nouveau_event_put_locked(event, index, handler);
+ }
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_destroy(struct nouveau_event **pevent)
+{
+ struct nouveau_event *event = *pevent;
+ if (event) {
+ kfree(event);
+ *pevent = NULL;
+ }
+}
+
+int
+nouveau_event_create(int index_nr, struct nouveau_event **pevent)
+{
+ struct nouveau_event *event;
+ int i;
+
+ event = *pevent = kzalloc(sizeof(*event) + index_nr *
+ sizeof(event->index[0]), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ spin_lock_init(&event->lock);
+ for (i = 0; i < index_nr; i++)
+ INIT_LIST_HEAD(&event->index[i].list);
+ event->index_nr = index_nr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 283248c7b05..d6dc2a65ccd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/falcon.h>
#include <core/class.h>
#include <core/enum.h>
@@ -100,8 +101,9 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ mthd, data);
nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index b9749051272..5bc021f471f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -126,10 +127,11 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
chid = pfifo->chid(pfifo, engctx);
if (stat) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
- printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, mthd, data);
+ pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ mthd, data);
}
nv_wr32(priv, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 21986f3bf0c..8bf8955051d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -102,8 +103,9 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, mthd, data);
nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 00000000000..7a5cae42834
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+void
+_nouveau_disp_dtor(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ nouveau_event_destroy(&disp->vblank);
+ nouveau_engine_destroy(&disp->base);
+}
+
+int
+nouveau_disp_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int heads,
+ const char *intname, const char *extname,
+ int length, void **pobject)
+{
+ struct nouveau_disp *disp;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true,
+ intname, extname, length, pobject);
+ disp = *pobject;
+ if (ret)
+ return ret;
+
+ return nouveau_event_create(heads, &disp->vblank);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 00000000000..fa27b02ff82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/i2c.h>
+
+#include <engine/disp.h>
+
+#include "dport.h"
+
+#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+ const struct nouveau_dp_func *func;
+ struct nouveau_disp *disp;
+ struct dcb_output *outp;
+ struct nvbios_dpout info;
+ u8 version;
+ struct nouveau_i2c_port *aux;
+ int head;
+ u8 dpcd[4];
+ int link_nr;
+ u32 link_bw;
+ u8 stat[6];
+ u8 conf[4];
+};
+
+static int
+dp_set_link_config(struct dp_state *dp)
+{
+ struct nouveau_disp *disp = dp->disp;
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+ u32 lnkcmp;
+ u8 sink[2];
+
+ DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+ /* set desired link configuration on the sink */
+ sink[0] = dp->link_bw / 27000;
+ sink[1] = dp->link_nr;
+ if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+ nv_wraux(dp->aux, DPCD_LC00, sink, 2);
+
+ /* set desired link configuration on the source */
+ if ((lnkcmp = dp->info.lnkcmp)) {
+ if (dp->version < 0x30) {
+ while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+ lnkcmp += 4;
+ init.offset = nv_ro16(bios, lnkcmp + 2);
+ } else {
+ while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+ lnkcmp += 3;
+ init.offset = nv_ro16(bios, lnkcmp + 1);
+ }
+
+ nvbios_exec(&init);
+ }
+
+ return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+ dp->link_nr, dp->link_bw / 27000,
+ dp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
+}
+
+static void
+dp_set_training_pattern(struct dp_state *dp, u8 pattern)
+{
+ u8 sink_tp;
+
+ DBG("training pattern %d\n", pattern);
+ dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
+
+ nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
+ sink_tp |= pattern;
+ nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+}
+
+static int
+dp_link_train_commit(struct dp_state *dp)
+{
+ int i;
+
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpre = (lane & 0x0c) >> 2;
+ u8 lvsw = (lane & 0x03) >> 0;
+
+ dp->conf[i] = (lpre << 3) | lvsw;
+ if (lvsw == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
+ if (lpre == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
+
+ DBG("config lane %d %02x\n", i, dp->conf[i]);
+ dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
+ }
+
+ return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
+}
+
+static int
+dp_link_train_update(struct dp_state *dp, u32 delay)
+{
+ int ret;
+
+ udelay(delay);
+
+ ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
+ if (ret)
+ return ret;
+
+ DBG("status %*ph\n", 6, dp->stat);
+ return 0;
+}
+
+static int
+dp_link_train_cr(struct dp_state *dp)
+{
+ bool cr_done = false, abort = false;
+ int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 1);
+
+ do {
+ if (dp_link_train_commit(dp) ||
+ dp_link_train_update(dp, 100))
+ break;
+
+ cr_done = true;
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
+ cr_done = false;
+ if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
+ abort = true;
+ break;
+ }
+ }
+
+ if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
+ voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ tries = 0;
+ }
+ } while (!cr_done && !abort && ++tries < 5);
+
+ return cr_done ? 0 : -1;
+}
+
+static int
+dp_link_train_eq(struct dp_state *dp)
+{
+ bool eq_done, cr_done = true;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 2);
+
+ do {
+ if (dp_link_train_update(dp, 400))
+ break;
+
+ eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
+ for (i = 0; i < dp->link_nr && eq_done; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE))
+ cr_done = false;
+ if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+ !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
+ eq_done = false;
+ }
+
+ if (dp_link_train_commit(dp))
+ break;
+ } while (!eq_done && cr_done && ++tries <= 5);
+
+ return eq_done ? 0 : -1;
+}
+
+static void
+dp_link_train_init(struct dp_state *dp, bool spread)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* set desired spread */
+ if (spread)
+ init.offset = dp->info.script[2];
+ else
+ init.offset = dp->info.script[3];
+ nvbios_exec(&init);
+
+ /* pre-train script */
+ init.offset = dp->info.script[0];
+ nvbios_exec(&init);
+}
+
+static void
+dp_link_train_fini(struct dp_state *dp)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* post-train script */
+ init.offset = dp->info.script[1],
+ nvbios_exec(&init);
+}
+
+int
+nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
+ struct dcb_output *outp, int head, u32 datarate)
+{
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ struct dp_state _dp = {
+ .disp = disp,
+ .func = func,
+ .outp = outp,
+ .head = head,
+ }, *dp = &_dp;
+ const u32 bw_list[] = { 270000, 162000, 0 };
+ const u32 *link_bw = bw_list;
+ u8 hdr, cnt, len;
+ u32 data;
+ int ret;
+
+ /* find the bios displayport data relevant to this output */
+ data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
+ &hdr, &cnt, &len, &dp->info);
+ if (!data) {
+ ERR("bios data not found\n");
+ return -EINVAL;
+ }
+
+ /* acquire the aux channel and fetch some info about the display */
+ if (outp->location)
+ dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+ else
+ dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
+ if (!dp->aux) {
+ ERR("no aux channel?!\n");
+ return -ENODEV;
+ }
+
+ ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
+ if (ret) {
+ ERR("failed to read DPCD\n");
+ return ret;
+ }
+
+ /* adjust required bandwidth for 8B/10B coding overhead */
+ datarate = (datarate / 8) * 10;
+
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dp, dp->dpcd[3] & 0x01);
+
+ /* start off at highest link rate supported by encoder and display */
+ while (*link_bw > (dp->dpcd[1] * 27000))
+ link_bw++;
+
+ while (link_bw[0]) {
+ /* find minimum required lane count at this link rate */
+ dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
+ while ((dp->link_nr >> 1) * link_bw[0] > datarate)
+ dp->link_nr >>= 1;
+
+ /* drop link rate to minimum with this lane count */
+ while ((link_bw[1] * dp->link_nr) > datarate)
+ link_bw++;
+ dp->link_bw = link_bw[0];
+
+ /* program selected link configuration */
+ ret = dp_set_link_config(dp);
+ if (ret == 0) {
+ /* attempt to train the link at this configuration */
+ memset(dp->stat, 0x00, sizeof(dp->stat));
+ if (!dp_link_train_cr(dp) &&
+ !dp_link_train_eq(dp))
+ break;
+ } else
+ if (ret >= 1) {
+ /* dp_set_link_config() handled training */
+ break;
+ }
+
+ /* retry at lower rate */
+ link_bw++;
+ }
+
+ /* finish link training */
+ dp_set_training_pattern(dp, 0);
+
+ /* execute post-train script from vbios */
+ dp_link_train_fini(dp);
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 00000000000..0e1bbd18ff6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,78 @@
+#ifndef __NVKM_DISP_DPORT_H__
+#define __NVKM_DISP_DPORT_H__
+
+/* DPCD Receiver Capabilities */
+#define DPCD_RC00 0x00000
+#define DPCD_RC00_DPCD_REV 0xff
+#define DPCD_RC01 0x00001
+#define DPCD_RC01_MAX_LINK_RATE 0xff
+#define DPCD_RC02 0x00002
+#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
+#define DPCD_RC02_MAX_LANE_COUNT 0x1f
+#define DPCD_RC03 0x00003
+#define DPCD_RC03_MAX_DOWNSPREAD 0x01
+
+/* DPCD Link Configuration */
+#define DPCD_LC00 0x00100
+#define DPCD_LC00_LINK_BW_SET 0xff
+#define DPCD_LC01 0x00101
+#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
+#define DPCD_LC01_LANE_COUNT_SET 0x1f
+#define DPCD_LC02 0x00102
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC03(l) ((l) + 0x00103)
+#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
+#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
+#define DPCD_LC03_MAX_SWING_REACHED 0x04
+#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
+
+/* DPCD Link/Sink Status */
+#define DPCD_LS02 0x00202
+#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40
+#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS02_LANE1_CR_DONE 0x10
+#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04
+#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS02_LANE0_CR_DONE 0x01
+#define DPCD_LS03 0x00203
+#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40
+#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS03_LANE3_CR_DONE 0x10
+#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04
+#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS03_LANE2_CR_DONE 0x01
+#define DPCD_LS04 0x00204
+#define DPCD_LS04_LINK_STATUS_UPDATED 0x80
+#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40
+#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01
+#define DPCD_LS06 0x00206
+#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0
+#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30
+#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c
+#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03
+#define DPCD_LS07 0x00207
+#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0
+#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
+#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
+#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
+
+struct nouveau_disp;
+struct dcb_output;
+
+struct nouveau_dp_func {
+ int (*pattern)(struct nouveau_disp *, struct dcb_output *,
+ int head, int pattern);
+ int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int link_nr, int link_bw, bool enh_frame);
+ int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int lane, int swing, int preem);
+};
+
+extern const struct nouveau_dp_func nv94_sor_dp_func;
+extern const struct nouveau_dp_func nvd0_sor_dp_func;
+extern const struct nouveau_dp_func nv50_pior_dp_func;
+
+int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
+ struct dcb_output *, int, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 1c919f2af89..05e903f08a3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,21 +24,33 @@
#include <engine/disp.h>
+#include <core/event.h>
+#include <core/class.h>
+
struct nv04_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs },
{},
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv04_disp_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+}
+
static void
-nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+nv04_disp_vblank_disable(struct nouveau_event *event, int head)
{
- struct nouveau_disp *disp = &priv->base;
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
}
static void
@@ -49,25 +61,25 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 crtc1 = nv_rd32(priv, 0x602100);
if (crtc0 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
}
static int
nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv04_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -75,6 +87,9 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv04_disp_vblank_enable;
+ priv->base.vblank->disable = nv04_disp_vblank_disable;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index ca1a7d76a95..5fa13267bd9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -27,7 +27,6 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/bios.h>
@@ -37,7 +36,6 @@
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include "nv50.h"
@@ -335,7 +333,7 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -374,7 +372,7 @@ nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -543,6 +541,18 @@ nv50_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (1 << head));
+}
+
+static void
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (0 << head));
+}
+
static int
nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -559,6 +569,9 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv50_disp_base_vblank_enable;
+ priv->base.vblank->disable = nv50_disp_base_vblank_disable;
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -613,7 +626,7 @@ nv50_disp_base_init(struct nouveau_object *object)
nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
}
- /* ... EXT caps */
+ /* ... PIOR caps */
for (i = 0; i < 3; i++) {
tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
@@ -665,6 +678,9 @@ nv50_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -756,50 +772,6 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv)
}
}
-static void
-nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010,
- lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
- bar->flush(bar);
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
- }
- }
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
- }
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
-
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
-}
-
static u16
exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -811,8 +783,8 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (outp < 4) {
type = DCB_OUTPUT_ANALOG;
mask = 0;
- } else {
- outp -= 4;
+ } else
+ if (outp < 8) {
switch (ctrl & 0x00000f00) {
case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -824,6 +796,17 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
return 0x0000;
}
+ outp -= 4;
+ } else {
+ outp = outp - 8;
+ type = 0x0010;
+ mask = 0;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type |= priv->pior.type[outp]; break;
+ default:
+ nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
}
mask = 0x00c0 & (mask << 6);
@@ -834,6 +817,10 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (!data)
return 0x0000;
+ /* off-chip encoders require matching the exact encoder type */
+ if (dcb->location != 0)
+ type |= dcb->extdev << 8;
+
return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
}
@@ -848,9 +835,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
u32 ctrl = 0x00000000;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -865,6 +854,13 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
return false;
i--;
@@ -894,13 +890,15 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
struct nvbios_outp info1;
struct nvbios_ocfg info2;
u8 ver, hdr, cnt, len;
- u16 data, conf;
u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -915,34 +913,46 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
- return 0x0000;
+ return conf;
i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)
- return 0x0000;
-
- switch (outp->type) {
- case DCB_OUTPUT_TMDS:
- conf = (ctrl & 0x00000f00) >> 8;
- if (pclk >= 165000)
- conf |= 0x0100;
- break;
- case DCB_OUTPUT_LVDS:
- conf = priv->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
+ return conf;
+
+ if (outp->location == 0) {
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+ } else {
conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
- default:
- conf = 0x00ff;
- break;
+ pclk = pclk / 2;
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
@@ -954,32 +964,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000060) >> 5) - 1;
- if (head >= 0) {
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 1);
- }
+ exec_script(priv, head, 1);
+}
- nv_wr32(priv, 0x610024, 0x00000010);
- nv_wr32(priv, 0x610030, 0x80000000);
+static void
+nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
+
+static void
+nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
}
static void
-nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
- struct dcb_output *outp, u32 pclk)
+nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
{
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
@@ -1085,53 +1100,54 @@ nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
}
static void
-nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
{
struct dcb_output outp;
- u32 addr, mask, data;
- int head;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 hval, hreg = 0x614200 + (head * 0x800);
+ u32 oval, oreg;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
- /* finish detaching encoder? */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 2);
-
- /* check whether a vpll change is required */
- head = ffs((super & 0x00000600) >> 9) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, datarate);
}
- nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
- }
-
- /* (re)attach the relevant OR to the head */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
- if (conf) {
- if (outp.type == DCB_OUTPUT_ANALOG) {
- addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
- mask = 0xffffffff;
- data = 0x00000000;
- } else {
- if (outp.type == DCB_OUTPUT_DP)
- nv50_disp_intr_unk20_dp(priv, &outp, pclk);
- addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
- mask = 0x00000707;
- data = (conf & 0x0100) ? 0x0101 : 0x0000;
- }
-
- nv_mask(priv, addr, mask, data);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
+
+ if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
+ oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000000;
+ hval = 0x00000000;
+ } else
+ if (!outp.location) {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
+ oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ hval = 0x00000000;
+ } else {
+ oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000001;
+ hval = 0x00000001;
}
- }
- nv_wr32(priv, 0x610024, 0x00000020);
- nv_wr32(priv, 0x610030, 0x80000000);
+ nv_mask(priv, hreg, 0x0000000f, hval);
+ nv_mask(priv, oreg, 0x00000707, oval);
+ }
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -1143,7 +1159,7 @@ nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
* programmed for DisplayPort.
*/
static void
-nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
{
struct nouveau_bios *bios = nouveau_bios(priv);
const int link = !(outp->sorconf.link & 1);
@@ -1157,35 +1173,79 @@ nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
}
static void
-nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- struct dcb_output outp;
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
- if (outp.type == DCB_OUTPUT_TMDS)
- nv50_disp_intr_unk40_tmds(priv, &outp);
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_0_tmds(priv, &outp);
+ else
+ if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
+
+ nouveau_dp_train(&priv->base, priv->pior.dp,
+ &outp, head, datarate);
}
}
-
- nv_wr32(priv, 0x610024, 0x00000040);
- nv_wr32(priv, 0x610030, 0x80000000);
}
-static void
-nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+void
+nv50_disp_intr_supervisor(struct work_struct *work)
{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
u32 super = nv_rd32(priv, 0x610030);
+ int head;
- nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
- if (intr1 & 0x00000010)
- nv50_disp_intr_unk10(priv, super);
- if (intr1 & 0x00000020)
- nv50_disp_intr_unk20(priv, super);
- if (intr1 & 0x00000040)
- nv50_disp_intr_unk40(priv, super);
+ if (priv->super & 0x00000010) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000020 << head)))
+ continue;
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk10_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000020) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000200 << head)))
+ continue;
+ nv50_disp_intr_unk20_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000040) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk40_0(priv, head);
+ }
+ }
+
+ nv_wr32(priv, 0x610030, 0x80000000);
}
void
@@ -1201,19 +1261,21 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nv50_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nv50_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
if (intr1 & 0x00000070) {
- nv50_disp_intr_super(priv, intr1);
+ priv->super = (intr1 & 0x00000070);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x610024, priv->super);
intr1 &= ~0x00000070;
}
}
@@ -1226,7 +1288,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -1235,16 +1297,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv50_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv50_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index a6bb931450f..1ae6ceb5670 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -3,16 +3,22 @@
#include <core/parent.h>
#include <core/namedb.h>
+#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <engine/dmaobj.h>
#include <engine/disp.h>
-struct dcb_output;
+#include "dport.h"
struct nv50_disp_priv {
struct nouveau_disp base;
struct nouveau_oclass *sclass;
+
+ struct work_struct supervisor;
+ u32 super;
+
struct {
int nr;
} head;
@@ -26,23 +32,15 @@ struct nv50_disp_priv {
int (*power)(struct nv50_disp_priv *, int sor, u32 data);
int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
- int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
- u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
- int lane, u16 type, u16 mask, u32 data,
- struct dcb_output *);
u32 lvdsconf;
+ const struct nouveau_dp_func *dp;
} sor;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ u8 type[3];
+ const struct nouveau_dp_func *dp;
+ } pior;
};
#define DAC_MTHD(n) (n), (n) + 0x03
@@ -81,6 +79,11 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
struct dcb_output *);
+#define PIOR_MTHD(n) (n), (n) + 0x03
+
+int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+
struct nv50_disp_base {
struct nouveau_parent base;
struct nouveau_ramht *ramht;
@@ -124,6 +127,7 @@ extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
extern struct nouveau_omthds nv84_disp_base_omthds[];
@@ -137,6 +141,7 @@ extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index fc84eacdfbe..d8c74c0883a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -46,6 +46,9 @@ nv84_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -63,7 +66,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -72,17 +75,18 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv84_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv84_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index ba9dfd4669a..a66f949c1f8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -44,14 +44,11 @@ nv94_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -69,7 +66,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -78,22 +75,19 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv94_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv94_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 5d63902cded..6cf8eefac36 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -53,7 +53,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -62,17 +62,18 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva0_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index e9192ca389f..b75413169ea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -45,14 +45,11 @@ nva3_disp_base_omthds[] = {
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -70,7 +67,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -79,23 +76,20 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva3_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva3_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nva3_hda_eld;
priv->sor.hdmi = nva3_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 9e38ebff5fb..788dd34ccb5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -27,12 +27,10 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include <subdev/bios.h>
@@ -230,7 +228,7 @@ nvd0_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -270,7 +268,7 @@ nvd0_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -443,6 +441,18 @@ nvd0_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
static int
nvd0_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -459,6 +469,10 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
+ priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
+
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -609,13 +623,24 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
}
static bool
-exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+exec_script(struct nv50_disp_priv *priv, int head, int id)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info;
struct dcb_output dcb;
u8 ver, hdr, cnt, len;
+ u32 ctrl = 0x00000000;
u16 data;
+ int outp;
+
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
+ return false;
data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@@ -635,21 +660,31 @@ exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
}
static u32
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
- u32 ctrl, int id, u32 pclk)
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+ u32 pclk, struct dcb_output *dcb)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info1;
struct nvbios_ocfg info2;
- struct dcb_output dcb;
u8 ver, hdr, cnt, len;
- u16 data, conf;
+ u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
+ int outp;
- data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
- if (data == 0x0000)
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
return false;
- switch (dcb.type) {
+ data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return conf;
+
+ switch (dcb->type) {
case DCB_OUTPUT_TMDS:
conf = (ctrl & 0x00000f00) >> 8;
if (pclk >= 165000)
@@ -668,46 +703,52 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
.offset = data,
- .outp = &dcb,
+ .outp = dcb,
.crtc = head,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
{
- int i;
+ exec_script(priv, head, 1);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 1);
- }
+static void
+nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+static void
+nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
}
static void
-nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+ struct dcb_output *outp)
{
+ const int or = ffs(outp->or) - 1;
const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
@@ -750,105 +791,102 @@ nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
}
static void
-nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
{
- u32 pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 2);
- }
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ u32 addr, data;
+
+ if (outp.type == DCB_OUTPUT_DP) {
+ u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30 / 8; break;
+ case 5: pclk = pclk * 24 / 8; break;
+ case 2:
+ default:
+ pclk = pclk * 18 / 8;
+ break;
+ }
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
- }
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, pclk);
+ }
- nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
- if (mcp & (1 << head)) {
- if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
- u32 addr, mask, data = 0x00000000;
- if (i < 4) {
- addr = 0x612280 + ((i - 0) * 0x800);
- mask = 0xffffffff;
- } else {
- switch (mcp & 0x00000f00) {
- case 0x00000800:
- case 0x00000900:
- nvd0_display_unk2_calc_tu(priv, head, i - 4);
- break;
- default:
- break;
- }
-
- addr = 0x612300 + ((i - 4) * 0x800);
- mask = 0x00000707;
- if (cfg & 0x00000100)
- data = 0x00000101;
- }
- nv_mask(priv, addr, mask, data);
- }
- break;
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
+ addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
}
- }
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+ nv_mask(priv, addr, 0x00000707, data);
+ }
}
static void
-nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
{
- int pclk, i;
-
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ exec_clkcmp(priv, head, 1, pclk, &outp);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
- if (mcp & (1 << head))
- exec_clkcmp(priv, head, i, mcp, 1, pclk);
+void
+nvd0_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
+ u32 mask[4];
+ int head;
+
+ nv_debug(priv, "supervisor %08x\n", priv->super);
+ for (head = 0; head < priv->head.nr; head++) {
+ mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
+ nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
+ if (priv->super & 0x00000001) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk1_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000002) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00010000))
+ continue;
+ nvd0_disp_intr_unk2_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000004) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk4_0(priv, head);
+ }
}
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ for (head = 0; head < priv->head.nr; head++)
+ nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
}
void
@@ -884,27 +922,11 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (intr & 0x00100000) {
u32 stat = nv_rd32(priv, 0x6100ac);
- u32 mask = 0, crtc = ~0;
-
- while (!mask && ++crtc < priv->head.nr)
- mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
-
- if (stat & 0x00000001) {
- nv_wr32(priv, 0x6100ac, 0x00000001);
- nvd0_display_unk1_handler(priv, crtc, mask);
- stat &= ~0x00000001;
- }
-
- if (stat & 0x00000002) {
- nv_wr32(priv, 0x6100ac, 0x00000002);
- nvd0_display_unk2_handler(priv, crtc, mask);
- stat &= ~0x00000002;
- }
-
- if (stat & 0x00000004) {
- nv_wr32(priv, 0x6100ac, 0x00000004);
- nvd0_display_unk4_handler(priv, crtc, mask);
- stat &= ~0x00000004;
+ if (stat & 0x00000007) {
+ priv->super = (stat & 0x00000007);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x6100ac, priv->super);
+ stat &= ~0x00000007;
}
if (stat) {
@@ -920,7 +942,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nvd0_disp_intr_vblank(priv, i);
+ nouveau_event_trigger(priv->base.vblank, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -933,10 +955,11 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -944,8 +967,9 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nvd0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nvd0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -953,14 +977,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 259537c4587..20725b363d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -51,10 +51,11 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -62,8 +63,9 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nve0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nve0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -71,14 +73,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
new file mode 100644
index 00000000000..2c8ce351b52
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+#include <subdev/i2c.h>
+
+#include "nv50.h"
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static struct nouveau_i2c_port *
+nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+}
+
+static int
+nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->pattern)
+ ret = port->func->pattern(port, pattern);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane_nr, int link_bw, bool enh)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port && port->func->lnk_ctl)
+ ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int vsw, int pre)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->drv_ctl)
+ ret = port->func->drv_ctl(port, lane, vsw, pre);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+const struct nouveau_dp_func
+nv50_pior_dp_func = {
+ .pattern = nv50_pior_dp_pattern,
+ .lnk_ctl = nv50_pior_dp_lnk_ctl,
+ .drv_ctl = nv50_pior_dp_drv_ctl,
+};
+
+/******************************************************************************
+ * General PIOR handling
+ *****************************************************************************/
+int
+nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
+ const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
+ mthd &= ~NV50_DISP_PIOR_MTHD_OR;
+ switch (mthd) {
+ case NV50_DISP_PIOR_PWR:
+ ret = priv->pior.power(priv, or, data[0]);
+ priv->pior.type[or] = type;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 39b6b67732d..ab1e918469a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -79,31 +79,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
ret = 0;
break;
- case NV94_DISP_SOR_DP_TRAIN:
- switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
- case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
- ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
- ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
- ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
- break;
- default:
- break;
- }
- break;
- case NV94_DISP_SOR_DP_LNKCTL:
- ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_DRVCTL(0):
- case NV94_DISP_SOR_DP_DRVCTL(1):
- case NV94_DISP_SOR_DP_DRVCTL(2):
- case NV94_DISP_SOR_DP_DRVCTL(3):
- ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
- type, mask, data, &outp);
- break;
default:
BUG_ON(1);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index f6edd009762..7ec4ee83fb6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -33,124 +33,53 @@
#include "nv50.h"
static inline u32
-nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+nv94_sor_soff(struct dcb_output *outp)
{
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv94[] = { 16, 8, 0, 24 };
- if (nv_device(priv)->chipset == 0xaf)
- return nvaf[lane];
- return nv94[lane];
+ return (ffs(outp->or) - 1) * 0x800;
}
-int
-nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_loff(struct dcb_output *outp)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
- init.offset = info.script[2];
- else
- init.offset = info.script[3];
- nvbios_exec(&init);
-
- init.offset = info.script[0];
- nvbios_exec(&init);
- }
-
- return 0;
+ return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
}
-int
-nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = info.script[1],
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- nvbios_exec(&init);
- }
-
- return 0;
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
}
-int
-nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
-int
-nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nv94_sor_soff(outp);
+ const u32 loff = nv94_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- /* -> 10Khz units */
- link_bw *= 2700;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (link_bw < nv_ro16(bios, info.lnkcmp))
- info.lnkcmp += 4;
- init.offset = nv_ro16(bios, info.lnkcmp + 2);
-
- nvbios_exec(&init);
- }
-
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
- if (link_bw > 16200)
+ if (link_bw > 0x06)
clksor |= 0x00040000;
for (i = 0; i < link_nr; i++)
@@ -162,24 +91,25 @@ nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -188,3 +118,10 @@ nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
return 0;
}
+
+const struct nouveau_dp_func
+nv94_sor_dp_func = {
+ .pattern = nv94_sor_dp_pattern,
+ .lnk_ctl = nv94_sor_dp_lnk_ctl,
+ .drv_ctl = nv94_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index c37ce7e29f5..9e1d435d728 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -33,59 +33,49 @@
#include "nv50.h"
static inline u32
+nvd0_sor_soff(struct dcb_output *outp)
+{
+ return (ffs(outp->or) - 1) * 0x800;
+}
+
+static inline u32
+nvd0_sor_loff(struct dcb_output *outp)
+{
+ return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+}
+
+static inline u32
nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
static const u8 nvd0[] = { 16, 8, 0, 24 };
return nvd0[lane];
}
-int
-nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
-int
-nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nvd0_sor_soff(outp);
+ const u32 loff = nvd0_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (nv_ro08(bios, info.lnkcmp) < link_bw)
- info.lnkcmp += 3;
- init.offset = nv_ro16(bios, info.lnkcmp + 1);
-
- nvbios_exec(&init);
- }
-
clksor |= link_bw << 18;
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
for (i = 0; i < link_nr; i++)
@@ -97,24 +87,25 @@ nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -124,3 +115,10 @@ nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
return 0;
}
+
+const struct nouveau_dp_func
+nvd0_sor_dp_func = {
+ .pattern = nvd0_sor_dp_pattern,
+ .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .drv_ctl = nvd0_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index c2b9db33581..7341ebe131f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/object.h>
#include <core/handle.h>
+#include <core/event.h>
#include <core/class.h>
#include <engine/dmaobj.h>
@@ -146,10 +148,25 @@ nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
return -1;
}
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
+{
+ struct nouveau_fifo_chan *chan = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (chid >= fifo->min && chid <= fifo->max)
+ chan = (void *)fifo->channel[chid];
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ return nouveau_client_name(chan);
+}
+
void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
+ nouveau_event_destroy(&priv->uevent);
nouveau_engine_destroy(&priv->base);
}
@@ -174,6 +191,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
+ ret = nouveau_event_create(1, &priv->uevent);
+ if (ret)
+ return ret;
+
priv->chid = nouveau_fifo_chid;
spin_lock_init(&priv->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index a47a8548f9e..f877bd524a9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -28,6 +28,7 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
@@ -398,6 +399,98 @@ out:
return handled;
}
+static void
+nv04_fifo_cache_error(struct nouveau_device *device,
+ struct nv04_fifo_priv *priv, u32 chid, u32 get)
+{
+ u32 mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+ * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+ * show that it wraps around to the start at GET=0x800.. No clue as to
+ * why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+ const char *client_name =
+ nouveau_client_name_for_fifo_chid(&priv->base, chid);
+ nv_error(priv,
+ "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
+ u32 chid)
+{
+ const char *client_name;
+ u32 dma_get = nv_rd32(priv, 0x003244);
+ u32 dma_put = nv_rd32(priv, 0x003240);
+ u32 push = nv_rd32(priv, 0x003220);
+ u32 state = nv_rd32(priv, 0x003228);
+
+ client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
+
+ if (device->card_type == NV_50) {
+ u32 ho_get = nv_rd32(priv, 0x003328);
+ u32 ho_put = nv_rd32(priv, 0x003320);
+ u32 ib_get = nv_rd32(priv, 0x003334);
+ u32 ib_put = nv_rd32(priv, 0x003330);
+
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state), push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(priv, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(priv, 0x003244, dma_put);
+ nv_wr32(priv, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put)
+ nv_wr32(priv, 0x003334, ib_put);
+ } else {
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nv_wr32(priv, 0x003244, dma_put);
+ }
+
+ nv_wr32(priv, 0x003228, 0x00000000);
+ nv_wr32(priv, 0x003220, 0x00000001);
+ nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
void
nv04_fifo_intr(struct nouveau_subdev *subdev)
{
@@ -416,96 +509,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (device->card_type < NV_40) {
- mthd = nv_rd32(priv,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(priv,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_error(priv, "CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(priv, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-
+ nv04_fifo_cache_error(device, priv, chid, get);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(priv, 0x003244);
- u32 dma_put = nv_rd32(priv, 0x003240);
- u32 push = nv_rd32(priv, 0x003220);
- u32 state = nv_rd32(priv, 0x003228);
-
- if (device->card_type == NV_50) {
- u32 ho_get = nv_rd32(priv, 0x003328);
- u32 ho_put = nv_rd32(priv, 0x003320);
- u32 ib_get = nv_rd32(priv, 0x003334);
- u32 ib_put = nv_rd32(priv, 0x003330);
-
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x (err: %s) Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- nv_dma_state_err(state),
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(priv, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(priv, 0x003244, dma_put);
- nv_wr32(priv, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(priv, 0x003334, ib_put);
- }
- } else {
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
- chid, dma_get, dma_put, state,
- nv_dma_state_err(state), push);
-
- if (dma_get != dma_put)
- nv_wr32(priv, 0x003244, dma_put);
- }
-
- nv_wr32(priv, 0x003228, 0x00000000);
- nv_wr32(priv, 0x003220, 0x00000001);
- nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ nv04_fifo_dma_pusher(device, priv, chid);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
@@ -528,6 +537,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
+
+ if (status & 0x40000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x40000000);
+ status &= ~0x40000000;
+ }
}
if (status) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index bd096364f68..840af617278 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -129,7 +129,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
/* do the kickoff... */
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
ret = -EBUSY;
}
@@ -480,7 +481,7 @@ nv50_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002044, 0x01003fff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 1eb1c512f50..094000e8787 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -26,6 +26,7 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
@@ -100,7 +101,8 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
nv_wr32(priv, 0x002520, save);
if (!done) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -378,6 +380,20 @@ nv84_fifo_cclass = {
* PFIFO engine
******************************************************************************/
+static void
+nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+}
+
+static void
+nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+}
+
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -401,6 +417,10 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nv84_fifo_uevent_enable;
+ priv->base.uevent->disable = nv84_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b4365dde185..4f226afb559 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -149,7 +150,8 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -333,17 +335,17 @@ nvc0_fifo_cclass = {
******************************************************************************/
static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
- { 0x00, "PGRAPH" },
+ { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
{ 0x03, "PEEPHOLE" },
{ 0x04, "BAR1" },
{ 0x05, "BAR3" },
- { 0x07, "PFIFO" },
- { 0x10, "PBSP" },
- { 0x11, "PPPP" },
+ { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
+ { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
{ 0x13, "PCOUNTER" },
- { 0x14, "PVP" },
- { 0x15, "PCOPY0" },
- { 0x16, "PCOPY1" },
+ { 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
+ { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
{ 0x17, "PDAEMON" },
{}
};
@@ -402,6 +404,9 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
switch (unit) {
case 3: /* PEEPHOLE */
@@ -420,16 +425,26 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -484,10 +499,12 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -501,12 +518,34 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
+ if (stat & 0x00000001) {
+ u32 intr = nv_rd32(priv, 0x00252c);
+ nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
if (stat & 0x00000100) {
- nv_warn(priv, "unknown status 0x00000100\n");
+ u32 intr = nv_rd32(priv, 0x00254c);
+ nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
+ if (stat & 0x00010000) {
+ u32 intr = nv_rd32(priv, 0x00256c);
+ nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x01000000) {
+ u32 intr = nv_rd32(priv, 0x00258c);
+ nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
@@ -536,11 +575,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x40000000) {
- nv_warn(priv, "unknown status 0x40000000\n");
- nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ u32 intr0 = nv_rd32(priv, 0x0025a4);
+ u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
+ nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
+ intr0, intr1);
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -548,6 +596,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -581,6 +643,10 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nvc0_fifo_uevent_enable;
+ priv->base.uevent->disable = nvc0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
nv_engine(priv)->cclass = &nvc0_fifo_cclass;
@@ -639,7 +705,8 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
+ nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index c930da99c2c..4419e40d88e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -184,7 +185,8 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -412,20 +414,34 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nve0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -480,10 +496,12 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -537,6 +555,12 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x80000000);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -544,6 +568,20 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -567,6 +605,10 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nve0_fifo_uevent_enable;
+ priv->base.uevent->disable = nve0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
nv_engine(priv)->cclass = &nve0_fifo_cclass;
@@ -617,7 +659,7 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index e30a9c5ff1f..ad13dcdd15f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1297,16 +1298,17 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv04_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 5c0f843ea24..23c143aaa55 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1193,16 +1194,17 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 5b20401bf91..0607b980174 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,3 +1,4 @@
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -224,15 +225,17 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(engctx), subc, class, mthd,
+ data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 0b36dd3deeb..17049d5c723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -321,16 +322,17 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 4, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index b1c3d835b4c..f2b1a7a124f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -24,6 +24,7 @@
#include <core/os.h>
#include <core/class.h>
+#include <core/client.h>
#include <core/handle.h>
#include <core/engctx.h>
#include <core/enum.h>
@@ -418,7 +419,7 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
nv_error(priv, "TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_enum_print(nv50_mp_exec_error_names, status);
- printk(" at %06x warp %d, opcode %08x %08x\n",
+ pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
@@ -532,7 +533,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
static int
nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
- int chid, u64 inst)
+ int chid, u64 inst, struct nouveau_object *engctx)
{
u32 status = nv_rd32(priv, 0x400108);
u32 ustatus;
@@ -565,12 +566,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x%08x "
- "400808 0x%08x 400848 0x%08x\n",
- chid, inst, subc, class, mthd, datah,
- datal, addr, r848);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, datah, datal, addr, r848);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -591,11 +591,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x 40084c 0x%08x\n",
- chid, inst, subc, class, mthd,
- data, addr);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, data, addr);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -623,7 +623,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_M2MF");
nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
@@ -644,7 +644,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_VFETCH");
nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
@@ -661,7 +661,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_STRMOUT");
nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
@@ -682,7 +682,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_CCACHE");
nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
@@ -774,11 +774,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 ecode = nv_rd32(priv, 0x400110);
nv_error(priv, "DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
- printk("\n");
+ pr_cont("\n");
}
if (stat & 0x00200000) {
- if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
+ if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
+ engctx))
show &= ~0x00200000;
}
@@ -786,12 +787,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
- printk("\n");
- nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
@@ -907,9 +909,8 @@ nv50_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400828, 0x00000000);
nv_wr32(priv, 0x40082c, 0x00000000);
nv_wr32(priv, 0x400830, 0x00000000);
- nv_wr32(priv, 0x400724, 0x00000000);
nv_wr32(priv, 0x40032c, 0x00000000);
- nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+ nv_wr32(priv, 0x400330, 0x00000000);
/* some unknown zcull magic */
switch (nv_device(priv)->chipset & 0xf0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 45aff5f5085..0de0dd724af 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -433,10 +433,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -444,9 +444,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -454,15 +455,16 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
+ nouveau_client_name(engctx));
nvc0_graph_trap_intr(priv);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
@@ -611,10 +613,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
static void
nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
+ kfree(fuc->data);
+ fuc->data = NULL;
}
void
@@ -622,8 +622,7 @@ nvc0_graph_dtor(struct nouveau_object *object)
{
struct nvc0_graph_priv *priv = (void *)object;
- if (priv->data)
- kfree(priv->data);
+ kfree(priv->data);
nvc0_graph_dtor_fw(&priv->fuc409c);
nvc0_graph_dtor_fw(&priv->fuc409d);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 9f82e9702b4..61cec0f6ff1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -78,15 +78,16 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
}
static void
-nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
+ struct nouveau_object *engctx)
{
u32 trap = nv_rd32(priv, 0x400108);
int rop;
if (trap & 0x00000001) {
u32 stat = nv_rd32(priv, 0x404000);
- nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x404000, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000001);
trap &= ~0x00000001;
@@ -94,8 +95,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
if (trap & 0x00000010) {
u32 stat = nv_rd32(priv, 0x405840);
- nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x405840, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000010);
trap &= ~0x00000010;
@@ -105,8 +106,10 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
for (rop = 0; rop < priv->rop_nr; rop++) {
u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
- nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
- rop, chid, inst, statz, statc);
+ nv_error(priv,
+ "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n",
+ rop, chid, inst, nouveau_client_name(engctx),
+ statz, statc);
nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
}
@@ -115,8 +118,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
}
if (trap) {
- nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
- chid, inst, trap);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), trap);
nv_wr32(priv, 0x400108, trap);
}
}
@@ -145,10 +148,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -156,9 +159,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -166,15 +170,15 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nve0_graph_trap_isr(priv, chid, inst);
+ nve0_graph_trap_isr(priv, chid, inst, engctx);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 9fd86375f4c..49ecbb859b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -231,8 +232,10 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, stat, type, mthd, data);
+ nv_error(priv,
+ "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), stat,
+ type, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b0e7e1c01ce..c48e7495377 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -28,6 +28,9 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -90,18 +93,11 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
-
if (crtc > 1)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -136,6 +132,29 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
+nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+ }
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nv50_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -150,6 +169,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nv50_software_vblsem_release;
return 0;
}
@@ -170,8 +190,8 @@ nv50_software_cclass = {
static int
nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_software_priv *priv;
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 282a1cd1bc2..a523eaad47e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -25,6 +25,9 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -72,18 +75,12 @@ nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -118,6 +115,23 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
+nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nvc0_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,6 +146,7 @@ nvc0_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nvc0_software_vblsem_release;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 47c4b3a5bd3..92d3ab11d96 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -154,6 +154,14 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 0046: NV04_DISP
+ */
+
+#define NV04_DISP_CLASS 0x00000046
+
+struct nv04_display_class {
+};
+
/* 5070: NV50_DISP
* 8270: NV84_DISP
* 8370: NVA0_DISP
@@ -190,25 +198,6 @@ struct nve0_channel_ind_class {
#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
-#define NV94_DISP_SOR_DP_TRAIN 0x00016000
-#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
-#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
-#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
-#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
-#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
#define NV50_DISP_DAC_MTHD 0x00020000
#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
@@ -230,6 +219,23 @@ struct nve0_channel_ind_class {
#define NV50_DISP_DAC_LOAD 0x0002000c
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+#define NV50_DISP_PIOR_MTHD 0x00030000
+#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_PIOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_PIOR_PWR 0x00030000
+#define NV50_DISP_PIOR_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
+#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_DP_PWR 0x00036000
+#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
+
struct nv50_display_class {
};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 63acc0346ff..c66eac51380 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -7,7 +7,7 @@ struct nouveau_client {
struct nouveau_namedb base;
struct nouveau_handle *root;
struct nouveau_object *device;
- char name[16];
+ char name[32];
u32 debug;
struct nouveau_vm *vm;
};
@@ -41,5 +41,6 @@ int nouveau_client_create_(const char *name, u64 device, const char *cfg,
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
+const char *nouveau_client_name(void *obj);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index e58b6f0984c..d351a4e5819 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -26,6 +26,7 @@ enum nv_subdev_type {
*/
NVDEV_SUBDEV_MXM,
NVDEV_SUBDEV_MC,
+ NVDEV_SUBDEV_BUS,
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
NVDEV_SUBDEV_LTCG,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
index e7b1e181943..4fc62bb8c1f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/enum.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -5,12 +5,13 @@ struct nouveau_enum {
u32 value;
const char *name;
const void *data;
+ u32 data2;
};
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *, u32 value);
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value);
struct nouveau_bitfield {
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
new file mode 100644
index 00000000000..9e094408f14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_EVENT_H__
+#define __NVKM_EVENT_H__
+
+/* return codes from event handlers */
+#define NVKM_EVENT_DROP 0
+#define NVKM_EVENT_KEEP 1
+
+struct nouveau_eventh {
+ struct list_head head;
+ int (*func)(struct nouveau_eventh *, int index);
+};
+
+struct nouveau_event {
+ spinlock_t lock;
+
+ void *priv;
+ void (*enable)(struct nouveau_event *, int index);
+ void (*disable)(struct nouveau_event *, int index);
+
+ int index_nr;
+ struct {
+ struct list_head list;
+ int refs;
+ } index[];
+};
+
+int nouveau_event_create(int index_nr, struct nouveau_event **);
+void nouveau_event_destroy(struct nouveau_event **);
+void nouveau_event_trigger(struct nouveau_event *, int index);
+
+void nouveau_event_get(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 106bb19fdd9..62e68baef08 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -136,7 +136,7 @@ static inline u8
nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
- nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
return data;
}
@@ -144,7 +144,7 @@ static inline u16
nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
- nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
return data;
}
@@ -152,28 +152,28 @@ static inline u32
nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
- nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
return data;
}
static inline void
nv_wo08(void *obj, u64 addr, u8 data)
{
- nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
nv_wo16(void *obj, u64 addr, u16 data)
{
- nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
nv_wo32(void *obj, u64 addr, u32 data)
{
- nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 1d629664f32..febed2ea5c8 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,7 +15,8 @@ struct nouveau_object;
#define NV_PRINTK_TRACE KERN_DEBUG
#define NV_PRINTK_SPAM KERN_DEBUG
-void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(4, 5)
+nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_printk(o,l,f,a...) do { \
if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 46948285f3e..28da6772c09 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -4,18 +4,11 @@
#include <core/object.h>
#include <core/engine.h>
#include <core/device.h>
+#include <core/event.h>
struct nouveau_disp {
struct nouveau_engine base;
-
- struct {
- struct list_head list;
- spinlock_t lock;
- void (*notify)(void *, int);
- void (*get)(void *, int);
- void (*put)(void *, int);
- void *data;
- } vblank;
+ struct nouveau_event *vblank;
};
static inline struct nouveau_disp *
@@ -24,16 +17,22 @@ nouveau_disp(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
}
-#define nouveau_disp_create(p,e,c,i,x,d) \
- nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
-#define nouveau_disp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
+#define nouveau_disp_create(p,e,c,h,i,x,d) \
+ nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
+ sizeof(**d), (void **)d)
+#define nouveau_disp_destroy(d) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_dtor(nv_object(disp)); \
+})
#define nouveau_disp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_disp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
-#define _nouveau_disp_dtor _nouveau_engine_dtor
+int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int heads,
+ const char *, const char *, int, void **);
+void _nouveau_disp_dtor(struct nouveau_object *);
#define _nouveau_disp_init _nouveau_engine_init
#define _nouveau_disp_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index f18846c8c6f..b46c197709f 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,6 +65,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
+ struct nouveau_event *uevent;
+
struct nouveau_object **channel;
spinlock_t lock;
u16 min;
@@ -92,6 +94,8 @@ int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int min, int max,
int size, void **);
void nouveau_fifo_destroy(struct nouveau_fifo *);
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
#define _nouveau_fifo_init _nouveau_engine_init
#define _nouveau_fifo_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index c945691c856..45799487e57 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,17 +3,17 @@
#include <core/engine.h>
#include <core/engctx.h>
+#include <core/event.h>
struct nouveau_software_chan {
struct nouveau_engctx base;
struct {
- struct list_head head;
+ struct nouveau_eventh event;
u32 channel;
u32 ctxdma;
u64 offset;
u32 value;
- u32 crtc;
} vblank;
int (*flip)(void *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index b79025da581..123270e9813 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -16,6 +16,8 @@ enum dcb_output_type {
struct dcb_output {
int index; /* may not be raw dcb index if merging has happened */
+ u16 hasht;
+ u16 hashm;
enum dcb_output_type type;
uint8_t i2c_index;
uint8_t heads;
@@ -25,6 +27,7 @@ struct dcb_output {
uint8_t or;
uint8_t link;
bool duallink_possible;
+ uint8_t extdev;
union {
struct sor_conf {
int link;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index e6563b5cb08..96d3364f6db 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -1,17 +1,22 @@
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
-struct nouveau_bios;
-
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
DCB_GPIO_TVDAC0 = 0x0c,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_PWM_FAN = 0x09,
+ DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
DCB_GPIO_UNUSED = 0xff
};
+#define DCB_GPIO_LOG_DIR 0x02
+#define DCB_GPIO_LOG_DIR_OUT 0x00
+#define DCB_GPIO_LOG_DIR_IN 0x02
+#define DCB_GPIO_LOG_VAL 0x01
+#define DCB_GPIO_LOG_VAL_LO 0x00
+#define DCB_GPIO_LOG_VAL_HI 0x01
+
struct dcb_gpio_func {
u8 func;
u8 line;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 5079bedfd98..10b57a19a7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -15,7 +15,7 @@ struct dcb_i2c_entry {
enum dcb_i2c_type type;
u8 drive;
u8 sense;
- u32 data;
+ u8 share;
};
u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index a2c4296fc5f..083541dbe9c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -23,11 +23,27 @@ struct nvbios_therm_sensor {
struct nvbios_therm_threshold thrs_shutdown;
};
+/* no vbios have more than 6 */
+#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
+struct nouveau_therm_trip_point {
+ int fan_duty;
+ int temp;
+ int hysteresis;
+};
+
struct nvbios_therm_fan {
u16 pwm_freq;
u8 min_duty;
u8 max_duty;
+
+ u16 bump_period;
+ u16 slow_down_period;
+
+ struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
+ u8 nr_fan_trip;
+ u8 linear_min_temp;
+ u8 linear_max_temp;
};
enum nvbios_therm_domain {
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
new file mode 100644
index 00000000000..360baab52e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
@@ -0,0 +1,19 @@
+#ifndef __NVBIOS_XPIO_H__
+#define __NVBIOS_XPIO_H__
+
+#define NVBIOS_XPIO_FLAG_AUX 0x10
+#define NVBIOS_XPIO_FLAG_AUX0 0x00
+#define NVBIOS_XPIO_FLAG_AUX1 0x10
+
+struct nvbios_xpio {
+ u8 type;
+ u8 addr;
+ u8 flags;
+};
+
+u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
new file mode 100644
index 00000000000..7d88ec4a6d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -0,0 +1,41 @@
+#ifndef __NOUVEAU_BUS_H__
+#define __NOUVEAU_BUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bus_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nouveau_bus {
+ struct nouveau_subdev base;
+};
+
+static inline struct nouveau_bus *
+nouveau_bus(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
+}
+
+#define nouveau_bus_create(p, e, o, d) \
+ nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
+ sizeof(**d), (void **)d)
+#define nouveau_bus_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_bus_init(p) \
+ nouveau_subdev_init(&(p)->base)
+#define nouveau_bus_fini(p, s) \
+ nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_bus_dtor _nouveau_subdev_dtor
+#define _nouveau_bus_init _nouveau_subdev_init
+#define _nouveau_bus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_bus_oclass;
+extern struct nouveau_oclass nv31_bus_oclass;
+extern struct nouveau_oclass nv50_bus_oclass;
+extern struct nouveau_oclass nvc0_bus_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index b75e8f18e52..c85b9f1579a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -3,6 +3,7 @@
#include <core/subdev.h>
#include <core/device.h>
+#include <core/event.h>
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
@@ -10,28 +11,18 @@
struct nouveau_gpio {
struct nouveau_subdev base;
+ struct nouveau_event *events;
+
/* hardware interfaces */
void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
- void (*irq_enable)(struct nouveau_gpio *, int line, bool);
/* software interfaces */
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
struct dcb_gpio_func *);
int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
- int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
-
- /* interrupt handling */
- struct list_head isr;
- spinlock_t lock;
-
- void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
- int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
- void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
};
static inline struct nouveau_gpio *
@@ -40,25 +31,23 @@ nouveau_gpio(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
}
-#define nouveau_gpio_create(p,e,o,d) \
- nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_gpio_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+#define nouveau_gpio_create(p,e,o,l,d) \
+ nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_dtor(nv_object(gpio)); \
+})
#define nouveau_gpio_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
-int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nouveau_gpio_init(struct nouveau_gpio *);
+int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void _nouveau_gpio_dtor(struct nouveau_object *);
+int nouveau_gpio_init(struct nouveau_gpio *);
extern struct nouveau_oclass nv10_gpio_oclass;
extern struct nouveau_oclass nv50_gpio_oclass;
extern struct nouveau_oclass nvd0_gpio_oclass;
-
-void nv50_gpio_dtor(struct nouveau_object *);
-int nv50_gpio_init(struct nouveau_object *);
-int nv50_gpio_fini(struct nouveau_object *, bool);
-void nv50_gpio_intr(struct nouveau_subdev *);
-void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
+extern struct nouveau_oclass nve0_gpio_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index b93ab01e378..888384c0bed 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -10,23 +10,59 @@
#define NV_I2C_PORT(n) (0x00 + (n))
#define NV_I2C_DEFAULT(n) (0x80 + (n))
+#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
+#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
+#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+
struct nouveau_i2c_port {
+ struct nouveau_object base;
struct i2c_adapter adapter;
- struct nouveau_i2c *i2c;
- struct i2c_algo_bit_data bit;
+
struct list_head head;
u8 index;
- u8 type;
- u32 dcb;
- u32 drive;
- u32 sense;
- u32 state;
+
+ const struct nouveau_i2c_func *func;
+};
+
+struct nouveau_i2c_func {
+ void (*acquire)(struct nouveau_i2c_port *);
+ void (*release)(struct nouveau_i2c_port *);
+
+ void (*drive_scl)(struct nouveau_i2c_port *, int);
+ void (*drive_sda)(struct nouveau_i2c_port *, int);
+ int (*sense_scl)(struct nouveau_i2c_port *);
+ int (*sense_sda)(struct nouveau_i2c_port *);
+
+ int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
+ int (*pattern)(struct nouveau_i2c_port *, int pattern);
+ int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
+ int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
+#define nouveau_i2c_port_create(p,e,o,i,a,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
+ sizeof(**d), (void **)d)
+#define nouveau_i2c_port_destroy(p) ({ \
+ struct nouveau_i2c_port *port = (p); \
+ _nouveau_i2c_port_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_port_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_i2c_port_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u8,
+ const struct i2c_algorithm *, int, void **);
+void _nouveau_i2c_port_dtor(struct nouveau_object *);
+#define _nouveau_i2c_port_init nouveau_object_init
+#define _nouveau_i2c_port_fini nouveau_object_fini
+
struct nouveau_i2c {
struct nouveau_subdev base;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
+ struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
@@ -40,21 +76,76 @@ nouveau_i2c(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
}
-extern struct nouveau_oclass nouveau_i2c_oclass;
+#define nouveau_i2c_create(p,e,o,s,d) \
+ nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
+#define nouveau_i2c_destroy(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_init(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_init(nv_object(i2c)); \
+})
+#define nouveau_i2c_fini(p,s) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_fini(nv_object(i2c), (s)); \
+})
-void nouveau_i2c_drive_scl(void *, int);
-void nouveau_i2c_drive_sda(void *, int);
-int nouveau_i2c_sense_scl(void *);
-int nouveau_i2c_sense_sda(void *);
+int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct nouveau_oclass *,
+ int, void **);
+void _nouveau_i2c_dtor(struct nouveau_object *);
+int _nouveau_i2c_init(struct nouveau_object *);
+int _nouveau_i2c_fini(struct nouveau_object *, bool);
-int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
-int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
-bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
-
-int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+extern struct nouveau_oclass nv04_i2c_oclass;
+extern struct nouveau_oclass nv4e_i2c_oclass;
+extern struct nouveau_oclass nv50_i2c_oclass;
+extern struct nouveau_oclass nv94_i2c_oclass;
+extern struct nouveau_oclass nvd0_i2c_oclass;
+extern struct nouveau_oclass nouveau_anx9805_sclass[];
extern const struct i2c_algorithm nouveau_i2c_bit_algo;
extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+static inline int
+nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+{
+ u8 val;
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 2);
+ if (ret != 2)
+ return -EIO;
+
+ return val;
+}
+
+static inline int
+nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 2, .buf = buf },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 1);
+ if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static inline bool
+nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+{
+ return nv_rdi2cr(port, addr, 0) >= 0;
+}
+
+int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index faee569fd45..6b17b614629 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,10 +4,10 @@
#include <core/device.h>
#include <core/subdev.h>
-enum nouveau_therm_fan_mode {
- FAN_CONTROL_NONE = 0,
- FAN_CONTROL_MANUAL = 1,
- FAN_CONTROL_NR,
+enum nouveau_therm_mode {
+ NOUVEAU_THERM_CTRL_NONE = 0,
+ NOUVEAU_THERM_CTRL_MANUAL = 1,
+ NOUVEAU_THERM_CTRL_AUTO = 2,
};
enum nouveau_therm_attr_type {
@@ -28,6 +28,11 @@ enum nouveau_therm_attr_type {
struct nouveau_therm {
struct nouveau_subdev base;
+ int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
+ int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
+ int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
+ int (*pwm_clock)(struct nouveau_therm *);
+
int (*fan_get)(struct nouveau_therm *);
int (*fan_set)(struct nouveau_therm *, int);
int (*fan_sense)(struct nouveau_therm *);
@@ -46,13 +51,29 @@ nouveau_therm(void *obj)
}
#define nouveau_therm_create(p,e,o,d) \
- nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
-#define nouveau_therm_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+ nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_therm_destroy(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_dtor(nv_object(therm)); \
+})
+#define nouveau_therm_init(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm)); \
+})
+#define nouveau_therm_fini(p,s) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm), (s)); \
+})
-#define _nouveau_therm_dtor _nouveau_subdev_dtor
+int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_therm_dtor(struct nouveau_object *);
+int _nouveau_therm_init(struct nouveau_object *);
+int _nouveau_therm_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv40_therm_oclass;
extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nva3_therm_oclass;
+extern struct nouveau_oclass nvd0_therm_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index c24ec8ab3db..e465d158d35 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -10,6 +10,14 @@ struct nouveau_alarm {
void (*func)(struct nouveau_alarm *);
};
+static inline void
+nouveau_alarm_init(struct nouveau_alarm *alarm,
+ void (*func)(struct nouveau_alarm *))
+{
+ INIT_LIST_HEAD(&alarm->head);
+ alarm->func = func;
+}
+
bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index cfe3b9cad15..eb496033b55 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/reboot.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index f621f69fa1a..e816f06637a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -172,7 +172,7 @@ out:
nv_wr32(bios, pcireg, access);
}
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 0fd87df99dd..2d9b9d7a799 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,18 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return (outp->extdev << 8) | (outp->location << 4) | outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
u16
dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
@@ -135,34 +147,28 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
case DCB_OUTPUT_DP:
outp->link = (conf & 0x00000030) >> 4;
outp->sorconf.link = outp->link; /*XXX*/
+ outp->extdev = 0x00;
+ if (outp->location != 0)
+ outp->extdev = (conf & 0x0000ff00) >> 8;
break;
default:
break;
}
}
+
+ outp->hasht = dcb_outp_hasht(outp);
+ outp->hashm = dcb_outp_hashm(outp);
}
return dcb;
}
-static inline u16
-dcb_outp_hasht(struct dcb_output *outp)
-{
- return outp->type;
-}
-
-static inline u16
-dcb_outp_hashm(struct dcb_output *outp)
-{
- return (outp->heads << 8) | (outp->link << 6) | outp->or;
-}
-
u16
dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *len, struct dcb_output *outp)
{
u16 dcb, idx = 0;
while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
- if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
if ((dcb_outp_hashm(outp) & mask) == mask)
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index 5afb568b2d6..b2a676e5358 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -48,7 +48,7 @@ extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return extdev + *hdr;
}
-u16
+static u16
nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c84e93fa6d9..172a4f99999 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
u16
dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
@@ -60,8 +61,14 @@ dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u16
dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u8 hdr, cnt;
- u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
+ u16 gpio;
+
+ if (!idx--)
+ gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
+ else
+ gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
+
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index ad577db8376..cfb9288c6d2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -70,12 +70,12 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
- info->data = nv_ro32(bios, ent + 0);
- info->type = nv_ro08(bios, ent + 3);
+ info->type = nv_ro08(bios, ent + 3);
+ info->share = DCB_I2C_UNUSED;
if (ver < 0x30) {
info->type &= 0x07;
if (info->type == 0x07)
- info->type = 0xff;
+ info->type = DCB_I2C_UNUSED;
}
switch (info->type) {
@@ -88,7 +88,11 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
return 0;
case DCB_I2C_NVIO_BIT:
case DCB_I2C_NVIO_AUX:
- info->drive = nv_ro08(bios, ent + 0);
+ info->drive = nv_ro08(bios, ent + 0) & 0x0f;
+ if (nv_ro08(bios, ent + 1) & 0x01) {
+ info->share = nv_ro08(bios, ent + 1) >> 1;
+ info->share &= 0x0f;
+ }
return 0;
case DCB_I2C_UNUSED:
return 0;
@@ -121,7 +125,8 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
if (!info->sense) info->sense = 0x36;
}
- info->type = DCB_I2C_NV04_BIT;
+ info->type = DCB_I2C_NV04_BIT;
+ info->share = DCB_I2C_UNUSED;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 690ed438b2a..2cc1e6a5eb6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -231,6 +231,11 @@ init_i2c(struct nvbios_init *init, int index)
return NULL;
}
+ if (index == -2 && init->outp->location) {
+ index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
+ return i2c->find_type(i2c, index);
+ }
+
index = init->outp->i2c_index;
}
@@ -258,7 +263,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
static int
init_rdauxr(struct nvbios_init *init, u32 addr)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
u8 data;
if (port && init_exec(init)) {
@@ -274,7 +279,7 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
if (port && init_exec(init))
return nv_wraux(port, addr, &data, 1);
return -ENODEV;
@@ -1816,7 +1821,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
u8 i, j;
trace("RAM_RESTRICT_ZM_REG_GROUP\t"
- "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
+ "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
init->offset += 7;
for (i = 0; i < num; i++) {
@@ -1849,7 +1854,7 @@ init_copy_zm_reg(struct nvbios_init *init)
u32 sreg = nv_ro32(bios, init->offset + 1);
u32 dreg = nv_ro32(bios, init->offset + 5);
- trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
+ trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
init->offset += 9;
init_wr32(init, dreg, init_rd32(init, sreg));
@@ -1866,7 +1871,7 @@ init_zm_reg_group(struct nvbios_init *init)
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
- trace("ZM_REG_GROUP\tR[0x%06x] =\n");
+ trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
init->offset += 6;
while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 862a08a2ae2..22a20573ed1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -55,7 +55,7 @@ therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nv_ro08(bios, therm + 1);
}
-u16
+static u16
nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
@@ -155,10 +155,15 @@ int
nvbios_therm_fan_parse(struct nouveau_bios *bios,
struct nvbios_therm_fan *fan)
{
+ struct nouveau_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
u16 entry;
+ uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
+ 75, 0, 85, 0, 100, 0, 100, 0 };
+
i = 0;
+ fan->nr_fan_trip = 0;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nv_ro16(bios, entry + 1);
@@ -167,9 +172,30 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
fan->min_duty = value & 0xff;
fan->max_duty = (value & 0xff00) >> 8;
break;
+ case 0x24:
+ fan->nr_fan_trip++;
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->hysteresis = value & 0xf;
+ cur_trip->temp = (value & 0xff0) >> 4;
+ cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
+ break;
+ case 0x25:
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->fan_duty = value;
+ break;
case 0x26:
fan->pwm_freq = value;
break;
+ case 0x3b:
+ fan->bump_period = value;
+ break;
+ case 0x3c:
+ fan->slow_down_period = value;
+ break;
+ case 0x46:
+ fan->linear_min_temp = nv_ro08(bios, entry + 1);
+ fan->linear_max_temp = nv_ro08(bios, entry + 2);
+ break;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
new file mode 100644
index 00000000000..e9b8e5d30a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
+
+static u16
+dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
+ if (data && *ver >= 0x40 && *hdr >= 0x06) {
+ u16 xpio = nv_ro16(bios, data + 0x04);
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_xpio *info)
+{
+ u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
+ if (data && *len >= 6) {
+ info->type = nv_ro08(bios, data + 0x04);
+ info->addr = nv_ro08(bios, data + 0x05);
+ info->flags = nv_ro08(bios, data + 0x06);
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
new file mode 100644
index 00000000000..8c7f8057a18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv04_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000001) {
+ nv_error(pbus, "BUS ERROR\n");
+ stat &= ~0x00000001;
+ nv_wr32(pbus, 0x001100, 0x00000001);
+ }
+
+ if (stat & 0x00000110) {
+ subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00000110;
+ nv_wr32(pbus, 0x001100, 0x00000110);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv04_bus_intr;
+ return 0;
+}
+
+static int
+nv04_bus_init(struct nouveau_object *object)
+{
+ struct nv04_bus_priv *priv = (void *)object;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00000111);
+
+ return nouveau_bus_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv04_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
new file mode 100644
index 00000000000..34132aef34e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv31_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv31_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+
+ if (gpio) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ }
+
+ if (stat & 0x00000008) { /* NV41- */
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00070000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00070000;
+ nv_wr32(pbus, 0x001100, 0x00070000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv31_bus_init(struct nouveau_object *object)
+{
+ struct nv31_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00070008);
+ return 0;
+}
+
+static int
+nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv31_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv31_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv31_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x31),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv31_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv31_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
new file mode 100644
index 00000000000..f5b2117fa8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv50_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv50_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000008) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00010000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00010000;
+ nv_wr32(pbus, 0x001100, 0x00010000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0);
+ }
+}
+
+static int
+nv50_bus_init(struct nouveau_object *object)
+{
+ struct nv50_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00010008);
+ return 0;
+}
+
+static int
+nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv50_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv50_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
new file mode 100644
index 00000000000..b192d624636
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nvc0_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nvc0_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x0000000e) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc),
+ (stat & 0x00000002) ? "!ENGINE " : "",
+ (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000008) ? "TIMEOUT " : "");
+
+ nv_wr32(pbus, 0x009084, 0x00000000);
+ nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+ stat &= ~0x0000000e;
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nvc0_bus_init(struct nouveau_object *object)
+{
+ struct nvc0_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x0000000e);
+ return 0;
+}
+
+static int
+nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nvc0_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nvc0_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index f8a7ed4166c..3937ced5c75 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -66,6 +66,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
@@ -103,8 +104,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
struct nouveau_device *device;
struct nouveau_devobj *devobj;
struct nv_device_class *args = data;
- u64 disable, boot0, strap;
- u64 mmio_base, mmio_size;
+ u32 boot0, strap;
+ u64 disable, mmio_base, mmio_size;
void __iomem *map;
int ret, i, c;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 8626d0d6cbb..473c5c03d3c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
#include <subdev/devinit.h>
@@ -46,10 +47,11 @@ nv04_identify(struct nouveau_device *device)
case 0x04:
device->cname = "NV04";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -63,10 +65,11 @@ nv04_identify(struct nouveau_device *device)
case 0x05:
device->cname = "NV05";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index 9c40b0fb23f..d0774f5bebe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -48,10 +49,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV10";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -64,10 +66,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV15";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -82,10 +85,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV16";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -100,10 +104,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -118,10 +123,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV11";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -136,10 +142,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV17";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -154,10 +161,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce2";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -172,10 +180,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV18";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 74f88f48e1c..ab920e0dc45 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV20";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV25";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV28";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -103,10 +107,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV2A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0ac1b2c4f61..5f2110261b0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV30";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV35";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV31";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -104,10 +108,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV36";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -123,10 +128,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV34";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 41d59689a02..f3d55efe9ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -24,6 +24,8 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/vm.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -50,11 +52,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV40";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -70,11 +73,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV41";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -90,11 +94,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV42";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -110,11 +115,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV43";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -130,11 +136,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV45";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -150,11 +157,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G70";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -170,11 +178,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G71";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -190,11 +199,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -210,11 +220,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -230,11 +241,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G72";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -250,11 +262,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -270,11 +283,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C61";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -290,11 +304,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C51";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -310,11 +325,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -330,11 +346,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C67";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -350,11 +367,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C68";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 6ccfd8585ba..5ed2fa51ddc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G80";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -79,12 +81,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G84";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -104,12 +107,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G86";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -129,12 +133,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G92";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -154,12 +159,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G94";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -179,12 +185,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G96";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -204,12 +211,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G98";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -229,12 +237,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G200";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -254,12 +263,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP77/MCP78";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -279,12 +289,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP79/MCP7A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -304,12 +315,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT215";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -330,12 +342,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT216";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -355,12 +368,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT218";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -380,12 +394,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP89";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index f0461685a42..4393eb4d656 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF100";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -85,12 +87,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -113,12 +116,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -141,12 +145,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF114";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -169,12 +174,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF116";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -197,12 +203,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF108";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -225,12 +232,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -253,12 +261,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF119";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -282,4 +291,4 @@ nvc0_identify(struct nouveau_device *device)
}
return 0;
-}
+ }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 03a652876e7..5c12391619f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -56,13 +57,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe4:
device->cname = "GK104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -84,13 +86,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe7:
device->cname = "GK107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -112,13 +115,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe6:
device->cname = "GK106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index ae7249b0979..4a857783841 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -78,12 +78,13 @@ nv50_devinit_init(struct nouveau_object *object)
if (ret)
return ret;
- /* if we ran the init tables, execute first script pointer for each
- * display table output entry that has a matching dcb entry.
+ /* if we ran the init tables, we have to execute the first script
+ * pointer of each dcb entry's display encoder table in order
+ * to properly initialise each encoder.
*/
- while (priv->base.post && ver) {
- u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
- if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+ if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
+ &ver, &hdr, &cnt, &len, &info)) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
@@ -95,7 +96,8 @@ nv50_devinit_init(struct nouveau_object *object)
nvbios_exec(&init);
}
- };
+ i++;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index eac236ed19b..0772ec97816 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
+#include <core/client.h>
#include <core/enum.h>
+#include <core/engctx.h>
+#include <core/object.h>
#include <subdev/fb.h>
#include <subdev/bios.h>
@@ -303,17 +305,18 @@ static const struct nouveau_enum vm_client[] = {
};
static const struct nouveau_enum vm_engine[] = {
- { 0x00000000, "PGRAPH", NULL },
- { 0x00000001, "PVP", NULL },
+ { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
+ { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
{ 0x00000004, "PEEPHOLE", NULL },
- { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
{ 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PPPP", NULL },
- { 0x00000009, "PBSP", NULL },
- { 0x0000000a, "PCRYPT", NULL },
+ { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
+ { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
{ 0x0000000b, "PCOUNTER", NULL },
{ 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
{ 0x0000000e, "PDAEMON", NULL },
{}
};
@@ -335,8 +338,10 @@ static void
nv50_fb_intr(struct nouveau_subdev *subdev)
{
struct nouveau_device *device = nv_device(subdev);
+ struct nouveau_engine *engine;
struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
+ struct nouveau_object *engctx = NULL;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
int i;
@@ -367,36 +372,55 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
}
chan = (trap[2] << 16) | trap[1];
- nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
+ en = nouveau_enum_find(vm_engine, st0);
+
+ if (en && en->data2) {
+ const struct nouveau_enum *orig_en = en;
+ while (en->name && en->value == st0 && en->data2) {
+ engine = nouveau_engine(subdev, en->data2);
+ if (engine) {
+ engctx = nouveau_engctx_get(engine, chan);
+ if (engctx)
+ break;
+ }
+ en++;
+ }
+ if (!engctx)
+ en = orig_en;
+ }
+
+ nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
(trap[5] & 0x00000100) ? "read" : "write",
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
- en = nouveau_enum_find(vm_engine, st0);
if (en)
- printk("%s/", en->name);
+ pr_cont("%s/", en->name);
else
- printk("%02x/", st0);
+ pr_cont("%02x/", st0);
cl = nouveau_enum_find(vm_client, st2);
if (cl)
- printk("%s/", cl->name);
+ pr_cont("%s/", cl->name);
else
- printk("%02x/", st2);
+ pr_cont("%02x/", st2);
if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
else cl = NULL;
if (cl)
- printk("%s", cl->name);
+ pr_cont("%s", cl->name);
else
- printk("%02x", st3);
+ pr_cont("%02x", st3);
- printk(" reason: ");
+ pr_cont(" reason: ");
en = nouveau_enum_find(vm_fault, st1);
if (en)
- printk("%s\n", en->name);
+ pr_cont("%s\n", en->name);
else
- printk("0x%08x\n", st1);
+ pr_cont("0x%08x\n", st1);
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 9fb0f9b92d4..d422acc9af1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -102,135 +102,19 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
return ret;
}
-static int
-nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
-{
- struct dcb_gpio_func func;
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- if (idx == 0 && gpio->irq_enable)
- gpio->irq_enable(gpio, func.line, on);
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-struct gpio_isr {
- struct nouveau_gpio *gpio;
- struct list_head head;
- struct work_struct work;
- int idx;
- struct dcb_gpio_func func;
- void (*handler)(void *, int);
- void *data;
- bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
- struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
- struct nouveau_gpio *gpio = isr->gpio;
- unsigned long flags;
- int state;
-
- state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
- isr->func.line);
- if (state >= 0)
- isr->handler(isr->data, state);
-
- spin_lock_irqsave(&gpio->lock, flags);
- isr->inhibit = false;
- spin_unlock_irqrestore(&gpio->lock, flags);
-}
-
-static void
-nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
-{
- struct gpio_isr *isr;
-
- if (idx != 0)
- return;
-
- spin_lock(&gpio->lock);
- list_for_each_entry(isr, &gpio->isr, head) {
- if (line_mask & (1 << isr->func.line)) {
- if (isr->inhibit)
- continue;
- isr->inhibit = true;
- schedule_work(&isr->work);
- }
- }
- spin_unlock(&gpio->lock);
-}
-
-static int
-nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct gpio_isr *isr;
- unsigned long flags;
- int ret;
-
- isr = kzalloc(sizeof(*isr), GFP_KERNEL);
- if (!isr)
- return -ENOMEM;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
- if (ret) {
- kfree(isr);
- return ret;
- }
-
- INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
- isr->gpio = gpio;
- isr->handler = handler;
- isr->data = data;
- isr->idx = idx;
-
- spin_lock_irqsave(&gpio->lock, flags);
- list_add(&isr->head, &gpio->isr);
- spin_unlock_irqrestore(&gpio->lock, flags);
- return 0;
-}
-
-static void
-nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
+void
+_nouveau_gpio_dtor(struct nouveau_object *object)
{
- struct gpio_isr *isr, *tmp;
- struct dcb_gpio_func func;
- unsigned long flags;
- LIST_HEAD(tofree);
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- spin_lock_irqsave(&gpio->lock, flags);
- list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
- if (memcmp(&isr->func, &func, sizeof(func)) ||
- isr->idx != idx ||
- isr->handler != handler || isr->data != data)
- continue;
- list_move_tail(&isr->head, &tofree);
- }
- spin_unlock_irqrestore(&gpio->lock, flags);
-
- list_for_each_entry_safe(isr, tmp, &tofree, head) {
- flush_work(&isr->work);
- kfree(isr);
- }
- }
+ struct nouveau_gpio *gpio = (void *)object;
+ nouveau_event_destroy(&gpio->events);
+ nouveau_subdev_destroy(&gpio->base);
}
int
nouveau_gpio_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass, int lines,
+ int length, void **pobject)
{
struct nouveau_gpio *gpio;
int ret;
@@ -241,15 +125,13 @@ nouveau_gpio_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ ret = nouveau_event_create(lines, &gpio->events);
+ if (ret)
+ return ret;
+
gpio->find = nouveau_gpio_find;
gpio->set = nouveau_gpio_set;
gpio->get = nouveau_gpio_get;
- gpio->irq = nouveau_gpio_irq;
- gpio->isr_run = nouveau_gpio_isr_run;
- gpio->isr_add = nouveau_gpio_isr_add;
- gpio->isr_del = nouveau_gpio_isr_del;
- INIT_LIST_HEAD(&gpio->isr);
- spin_lock_init(&gpio->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 168d16a9a8e..76d5d5465dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -24,7 +24,7 @@
*
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv10_gpio_priv {
struct nouveau_gpio base;
@@ -83,27 +83,36 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
static void
-nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 mask = 0x00010001 << line;
-
- nv_wr32(gpio, 0x001104, mask);
- nv_mask(gpio, 0x001144, mask, on ? mask : 0);
-}
-
-static void
nv10_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv10_gpio_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x001104);
u32 hi = (intr & 0x0000ffff) >> 0;
u32 lo = (intr & 0xffff0000) >> 16;
+ int i;
- priv->base.isr_run(&priv->base, 0, hi | lo);
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0x001104, intr);
}
+static void
+nv10_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
+}
+
+static void
+nv10_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
+}
+
static int
nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -112,14 +121,16 @@ nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv10_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.drive = nv10_gpio_drive;
priv->base.sense = nv10_gpio_sense;
- priv->base.irq_enable = nv10_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv10_gpio_intr_enable;
+ priv->base.events->disable = nv10_gpio_intr_disable;
nv_subdev(priv)->intr = nv10_gpio_intr;
return 0;
}
@@ -141,8 +152,6 @@ nv10_gpio_init(struct nouveau_object *object)
if (ret)
return ret;
- nv_wr32(priv, 0x001140, 0x00000000);
- nv_wr32(priv, 0x001100, 0xffffffff);
nv_wr32(priv, 0x001144, 0x00000000);
nv_wr32(priv, 0x001104, 0xffffffff);
return 0;
@@ -152,7 +161,6 @@ static int
nv10_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv10_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0x001140, 0x00000000);
nv_wr32(priv, 0x001144, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf13a1200f2..bf489dcf46e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv50_gpio_priv {
struct nouveau_gpio base;
@@ -95,21 +95,12 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
}
void
-nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 reg = line < 16 ? 0xe050 : 0xe070;
- u32 mask = 0x00010001 << (line & 0xf);
-
- nv_wr32(gpio, reg + 4, mask);
- nv_mask(gpio, reg + 0, mask, on ? mask : 0);
-}
-
-void
nv50_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv50_gpio_priv *priv = (void *)subdev;
u32 intr0, intr1 = 0;
u32 hi, lo;
+ int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
if (nv_device(priv)->chipset >= 0x90)
@@ -117,13 +108,35 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- priv->base.isr_run(&priv->base, 0, hi | lo);
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0xe054, intr0);
if (nv_device(priv)->chipset >= 0x90)
nv_wr32(priv, 0xe074, intr1);
}
+void
+nv50_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nv50_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
static int
nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,7 +145,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass,
+ nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -140,7 +155,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nv50_gpio_reset;
priv->base.drive = nv50_gpio_drive;
priv->base.sense = nv50_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 83e8b8f16e6..010431e3ace 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -22,13 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nvd0_gpio_priv {
struct nouveau_gpio base;
};
-static void
+void
nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
@@ -57,7 +57,7 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
}
-static int
+int
nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
{
u32 data = ((dir ^ 1) << 13) | (out << 12);
@@ -66,7 +66,7 @@ nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
return 0;
}
-static int
+int
nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
{
return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
@@ -80,7 +80,7 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvd0_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -88,7 +88,9 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nvd0_gpio_reset;
priv->base.drive = nvd0_gpio_drive;
priv->base.sense = nvd0_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
new file mode 100644
index 00000000000..16b8c5bf5ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nve0_gpio_priv {
+ struct nouveau_gpio base;
+};
+
+void
+nve0_gpio_intr(struct nouveau_subdev *subdev)
+{
+ struct nve0_gpio_priv *priv = (void *)subdev;
+ u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
+ u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
+ u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ int i;
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
+
+ nv_wr32(priv, 0xdc00, intr0);
+ nv_wr32(priv, 0xdc88, intr1);
+}
+
+void
+nve0_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nve0_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
+int
+nve0_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nv_wr32(priv, 0xdc08, 0x00000000);
+ nv_wr32(priv, 0xdc88, 0x00000000);
+ return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+int
+nve0_gpio_init(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_gpio_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0xdc00, 0xffffffff);
+ nv_wr32(priv, 0xdc80, 0xffffffff);
+ return 0;
+}
+
+void
+nve0_gpio_dtor(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_gpio_priv *priv;
+ int ret;
+
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.reset = nvd0_gpio_reset;
+ priv->base.drive = nvd0_gpio_drive;
+ priv->base.sense = nvd0_gpio_sense;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nve0_gpio_intr_enable;
+ priv->base.events->disable = nve0_gpio_intr_disable;
+ nv_subdev(priv)->intr = nve0_gpio_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_gpio_oclass = {
+ .handle = NV_SUBDEV(GPIO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_gpio_ctor,
+ .dtor = nv50_gpio_dtor,
+ .init = nve0_gpio_init,
+ .fini = nve0_gpio_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
new file mode 100644
index 00000000000..2ee1c895c78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_GPIO_H__
+#define __NVKM_GPIO_H__
+
+#include <subdev/gpio.h>
+
+void nv50_gpio_dtor(struct nouveau_object *);
+int nv50_gpio_init(struct nouveau_object *);
+int nv50_gpio_fini(struct nouveau_object *, bool);
+void nv50_gpio_intr(struct nouveau_subdev *);
+void nv50_gpio_intr_enable(struct nouveau_event *, int line);
+void nv50_gpio_intr_disable(struct nouveau_event *, int line);
+
+void nvd0_gpio_reset(struct nouveau_gpio *, u8);
+int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
+int nvd0_gpio_sense(struct nouveau_gpio *, int);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
new file mode 100644
index 00000000000..dec94e9d776
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/i2c.h>
+
+struct anx9805_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+};
+
+static int
+anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ u8 tmp, i;
+
+ nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
+ nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+ nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
+ nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
+ mdelay(5);
+ if (i++ == 100) {
+ nv_error(port, "link training timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (tmp & 0x70) {
+ nv_error(port, "link training failed: 0x%02x\n", tmp);
+ return -EIO;
+ }
+
+ return 1;
+}
+
+static int
+anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ int i, ret = -ETIMEDOUT;
+ u8 tmp;
+
+ tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+
+ nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+ for (i = 0; !(type & 1) && i < size; i++)
+ nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
+ nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
+ nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
+ nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
+ nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
+ nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+ mdelay(5);
+ if (i++ == 32)
+ goto done;
+ }
+
+ if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+ ret = -EIO;
+ goto done;
+ }
+
+ for (i = 0; (type & 1) && i < size; i++)
+ data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+ ret = 0;
+done:
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+ return ret;
+}
+
+static const struct nouveau_i2c_func
+anx9805_aux_func = {
+ .aux = anx9805_aux,
+ .lnk_ctl = anx9805_train,
+};
+
+static int
+anx9805_aux_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *chan;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ chan->addr = 0x38;
+ chan->ctrl = 0x39;
+ break;
+ case 0x0e:
+ chan->addr = 0x3c;
+ chan->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ chan->base.func = &anx9805_aux_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_aux_ofuncs = {
+ .ctor = anx9805_aux_chan_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+static int
+anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct anx9805_i2c_port *port = adap->algo_data;
+ struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
+ struct i2c_msg *msg = msgs;
+ int ret = -ETIMEDOUT;
+ int i, j, cnt = num;
+ u8 seg = 0x00, off = 0x00, tmp;
+
+ tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, port->addr, 0x43, 0x05);
+ mdelay(5);
+
+ while (cnt--) {
+ if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+ nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
+ nv_wri2cr(mast, port->addr, 0x41, seg);
+ nv_wri2cr(mast, port->addr, 0x42, off);
+ nv_wri2cr(mast, port->addr, 0x44, msg->len);
+ nv_wri2cr(mast, port->addr, 0x45, 0x00);
+ nv_wri2cr(mast, port->addr, 0x43, 0x01);
+ for (i = 0; i < msg->len; i++) {
+ j = 0;
+ while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+ mdelay(5);
+ if (j++ == 32)
+ goto done;
+ }
+ msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+ }
+ } else
+ if (!(msg->flags & I2C_M_RD)) {
+ if (msg->addr == 0x50 && msg->len == 0x01) {
+ off = msg->buf[0];
+ } else
+ if (msg->addr == 0x30 && msg->len == 0x01) {
+ seg = msg->buf[0];
+ } else
+ goto done;
+ } else {
+ goto done;
+ }
+ msg++;
+ }
+
+ ret = num;
+done:
+ nv_wri2cr(mast, port->addr, 0x43, 0x00);
+ return ret;
+}
+
+static u32
+anx9805_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+anx9805_i2c_algo = {
+ .master_xfer = anx9805_xfer,
+ .functionality = anx9805_func
+};
+
+static const struct nouveau_i2c_func
+anx9805_i2c_func = {
+};
+
+static int
+anx9805_ddc_port_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &anx9805_i2c_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ port->addr = 0x3d;
+ port->ctrl = 0x39;
+ break;
+ case 0x0e:
+ port->addr = 0x3f;
+ port->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ port->base.func = &anx9805_i2c_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_ddc_ofuncs = {
+ .ctor = anx9805_ddc_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+struct nouveau_oclass
+nouveau_anx9805_sclass[] = {
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index dc27e794a85..5de074ad170 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -24,151 +24,40 @@
#include <subdev/i2c.h>
-/******************************************************************************
- * aux channel util functions
- *****************************************************************************/
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nouveau_i2c *aux, int ch)
-{
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nouveau_i2c *aux, int ch)
-{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(aux, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
-
- return 0;
-}
-
-static int
-auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
-{
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ret, i;
-
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
- ret = auxch_init(aux, ch);
- if (ret)
- goto out;
-
- stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
- }
- }
-
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
- /* retry transaction a number of times on failure... */
- ret = -EREMOTEIO;
- for (retries = 0; retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- goto out;
- }
- } while (ctrl & 0x00010000);
-
- /* read status, and check if transaction completed ok */
- stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
- if (!(stat & 0x000f0f00)) {
- ret = 0;
- break;
- }
-
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
-
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
- }
-
-out:
- auxch_fini(aux, ch);
- return ret;
-}
-
int
-nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 9, addr, data, size);
+ }
+ return -ENODEV;
}
int
-nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 8, addr, data, size);
+ }
+ return -ENODEV;
}
static int
aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
+ if (!port->func->aux)
+ return -ENODEV;
+ if ( port->func->acquire)
+ port->func->acquire(port);
+
while (mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
@@ -185,8 +74,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
- msg->addr, ptr, cnt);
+ ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index dbfc2abf0cf..a114a0ed7e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Red Hat Inc.
+ * Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,64 +22,136 @@
* Authors: Ben Skeggs
*/
-#include "core/option.h"
+#include <core/option.h>
-#include "subdev/i2c.h"
-#include "subdev/vga.h"
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
-int
-nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+/******************************************************************************
+ * interface to linux i2c bit-banging algorithm
+ *****************************************************************************/
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+#define CSTMSEL true
+#else
+#define CSTMSEL false
+#endif
+
+static int
+nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
{
- u8 val;
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
- };
+ struct i2c_algo_bit_data *bit = adap->algo_data;
+ struct nouveau_i2c_port *port = bit->data;
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return 0;
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static void
+nouveau_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_scl(port, state);
+}
- return val;
+static void
+nouveau_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_sda(port, state);
}
-int
-nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+static int
+nouveau_i2c_getscl(void *data)
{
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = 0, .len = 1, .buf = &val },
- };
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_scl(port);
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static int
+nouveau_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_sda(port);
+}
- return 0;
+/******************************************************************************
+ * base i2c "port" class implementation
+ *****************************************************************************/
+
+void
+_nouveau_i2c_port_dtor(struct nouveau_object *object)
+{
+ struct nouveau_i2c_port *port = (void *)object;
+ i2c_del_adapter(&port->adapter);
+ nouveau_object_destroy(&port->base);
}
-bool
-nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+int
+nouveau_i2c_port_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u8 index,
+ const struct i2c_algorithm *algo,
+ int size, void **pobject)
{
- u8 buf[] = { 0 };
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = 0,
- .len = 1,
- .buf = buf,
- },
- {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_i2c *i2c = (void *)engine;
+ struct nouveau_i2c_port *port;
+ int ret;
- return i2c_transfer(&port->adapter, msgs, 2) == 2;
+ ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ port = *pobject;
+ if (ret)
+ return ret;
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", device->name, index);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &device->pdev->dev;
+ port->index = index;
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ if ( algo == &nouveau_i2c_bit_algo &&
+ !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
+ struct i2c_algo_bit_data *bit;
+
+ bit = kzalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return -ENOMEM;
+
+ bit->udelay = 10;
+ bit->timeout = usecs_to_jiffies(2200);
+ bit->data = port;
+ bit->pre_xfer = nouveau_i2c_pre_xfer;
+ bit->setsda = nouveau_i2c_setsda;
+ bit->setscl = nouveau_i2c_setscl;
+ bit->getsda = nouveau_i2c_getsda;
+ bit->getscl = nouveau_i2c_getscl;
+
+ port->adapter.algo_data = bit;
+ ret = i2c_bit_add_bus(&port->adapter);
+ } else {
+ port->adapter.algo_data = port;
+ port->adapter.algo = algo;
+ ret = i2c_add_adapter(&port->adapter);
+ }
+
+ /* drop port's i2c subdev refcount, i2c handles this itself */
+ if (ret == 0) {
+ list_add_tail(&port->head, &i2c->ports);
+ atomic_dec(&engine->refcount);
+ }
+
+ return ret;
}
+/******************************************************************************
+ * base i2c subdev class implementation
+ *****************************************************************************/
+
static struct nouveau_i2c_port *
nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
{
@@ -103,29 +175,23 @@ nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
list_for_each_entry(port, &i2c->ports, head) {
if (port->index == index)
- break;
+ return port;
}
- if (&port->head == &i2c->ports)
- return NULL;
+ return NULL;
+}
- if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
- u32 reg = 0x00e500, val;
- if (port->type == 6) {
- reg += port->drive * 0x50;
- val = 0x2002;
- } else {
- reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
- val = 0xe001;
- }
+static struct nouveau_i2c_port *
+nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
+{
+ struct nouveau_i2c_port *port;
- /* nfi, but neither auxch or i2c work if it's 1 */
- nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
- /* nfi, but switches auxch vs normal i2c */
- nv_mask(i2c, reg + 0x00, 0x0000f003, val);
+ list_for_each_entry(port, &i2c->ports, head) {
+ if (nv_hclass(port) == type)
+ return port;
}
- return port;
+ return NULL;
}
static int
@@ -155,109 +221,86 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
return -ENODEV;
}
-void
-nouveau_i2c_drive_scl(void *data, int state)
+int
+_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
- struct nouveau_i2c_port *port = data;
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
+ if (ret && suspend)
+ goto fail;
}
-}
-
-void
-nouveau_i2c_drive_sda(void *data, int state)
-{
- struct nouveau_i2c_port *port = data;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ return nouveau_subdev_fini(&i2c->base, suspend);
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->init(nv_object(port));
}
+
+ return ret;
}
int
-nouveau_i2c_sense_scl(void *data)
+_nouveau_i2c_init(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x01);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x10);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
+
+ ret = nouveau_subdev_init(&i2c->base);
+ if (ret == 0) {
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->init(nv_object(port));
+ if (ret)
+ goto fail;
+ }
}
- return 0;
+ return ret;
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->fini(nv_object(port), false);
+ }
+
+ return ret;
}
-int
-nouveau_i2c_sense_sda(void *data)
+void
+_nouveau_i2c_dtor(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x02);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x20);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port, *temp;
+
+ list_for_each_entry_safe(port, temp, &i2c->ports, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&port);
}
- return 0;
+ nouveau_subdev_destroy(&i2c->base);
}
-static const u32 nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
+static struct nouveau_oclass *
+nouveau_i2c_extdev_sclass[] = {
+ nouveau_anx9805_sclass,
};
-static int
-nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nouveau_i2c_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nouveau_oclass *sclass,
+ int length, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_i2c_port *port;
struct nouveau_i2c *i2c;
+ struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i = -1;
+ int ret, i, j, index = -1;
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
ret = nouveau_subdev_create(parent, engine, oclass, 0,
"I2C", "i2c", &i2c);
@@ -266,142 +309,60 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
i2c->find = nouveau_i2c_find;
+ i2c->find_type = nouveau_i2c_find_type;
i2c->identify = nouveau_i2c_identify;
INIT_LIST_HEAD(&i2c->ports);
- while (!dcb_i2c_parse(bios, ++i, &info)) {
+ while (!dcb_i2c_parse(bios, ++index, &info)) {
if (info.type == DCB_I2C_UNUSED)
continue;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- nv_error(i2c, "failed port memory alloc at %d\n", i);
- break;
- }
-
- port->type = info.type;
- switch (port->type) {
- case DCB_I2C_NV04_BIT:
- port->drive = info.drive;
- port->sense = info.sense;
- break;
- case DCB_I2C_NV4E_BIT:
- port->drive = 0x600800 + info.drive;
- port->sense = port->drive;
- break;
- case DCB_I2C_NVIO_BIT:
- port->drive = info.drive & 0x0f;
- if (device->card_type < NV_D0) {
- if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
- break;
- port->drive = nv50_i2c_port[port->drive];
- port->sense = port->drive;
- } else {
- port->drive = 0x00d014 + (port->drive * 0x20);
- port->sense = port->drive;
+ oclass = sclass;
+ do {
+ ret = -EINVAL;
+ if (oclass->handle == info.type) {
+ ret = nouveau_object_ctor(*pobject, *pobject,
+ oclass, &info,
+ index, &object);
}
+ } while (ret && (++oclass)->handle);
+ }
+
+ /* in addition to the busses specified in the i2c table, there
+ * may be ddc/aux channels hiding behind external tmds/dp/etc
+ * transmitters.
+ */
+ index = ((index + 0x0f) / 0x10) * 0x10;
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
+ if (!outp.location || !outp.extdev)
+ continue;
+
+ switch (outp.type) {
+ case DCB_OUTPUT_TMDS:
+ info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
break;
- case DCB_I2C_NVIO_AUX:
- port->drive = info.drive & 0x0f;
- port->sense = port->drive;
- port->adapter.algo = &nouveau_i2c_aux_algo;
+ case DCB_OUTPUT_DP:
+ info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
break;
default:
- break;
- }
-
- if (!port->adapter.algo && !port->drive) {
- nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
- i, port->type, port->drive, port->sense);
- kfree(port);
continue;
}
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", device->name, i);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &device->pdev->dev;
- port->i2c = i2c;
- port->index = i;
- port->dcb = info.data;
- i2c_set_adapdata(&port->adapter, i2c);
-
- if (port->adapter.algo != &nouveau_i2c_aux_algo) {
- nouveau_i2c_drive_scl(port, 0);
- nouveau_i2c_drive_sda(port, 1);
- nouveau_i2c_drive_scl(port, 1);
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
- if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
-#else
- if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
-#endif
- port->adapter.algo = &nouveau_i2c_bit_algo;
- ret = i2c_add_adapter(&port->adapter);
- } else {
- port->adapter.algo_data = &port->bit;
- port->bit.udelay = 10;
- port->bit.timeout = usecs_to_jiffies(2200);
- port->bit.data = port;
- port->bit.setsda = nouveau_i2c_drive_sda;
- port->bit.setscl = nouveau_i2c_drive_scl;
- port->bit.getsda = nouveau_i2c_sense_sda;
- port->bit.getscl = nouveau_i2c_sense_scl;
- ret = i2c_bit_add_bus(&port->adapter);
- }
- } else {
- port->adapter.algo = &nouveau_i2c_aux_algo;
- ret = i2c_add_adapter(&port->adapter);
- }
-
- if (ret) {
- nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
- kfree(port);
- continue;
+ ret = -ENODEV;
+ j = -1;
+ while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
+ parent = nv_object(i2c->find(i2c, outp.i2c_index));
+ oclass = nouveau_i2c_extdev_sclass[j];
+ do {
+ if (oclass->handle != info.type)
+ continue;
+ ret = nouveau_object_ctor(parent, *pobject,
+ oclass, NULL,
+ index++, &object);
+ } while (ret && (++oclass)->handle);
}
-
- list_add_tail(&port->head, &i2c->ports);
}
return 0;
}
-
-static void
-nouveau_i2c_dtor(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port, *temp;
-
- list_for_each_entry_safe(port, temp, &i2c->ports, head) {
- i2c_del_adapter(&port->adapter);
- list_del(&port->head);
- kfree(port);
- }
-
- nouveau_subdev_destroy(&i2c->base);
-}
-
-static int
-nouveau_i2c_init(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_init(&i2c->base);
-}
-
-static int
-nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_fini(&i2c->base, suspend);
-}
-
-struct nouveau_oclass
-nouveau_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_i2c_ctor,
- .dtor = nouveau_i2c_dtor,
- .init = nouveau_i2c_init,
- .fini = nouveau_i2c_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index 1c4c9a5c8e2..a6e72d3b06b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -32,25 +32,25 @@
static inline void
i2c_drive_scl(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_scl(port, state);
+ port->func->drive_scl(port, state);
}
static inline void
i2c_drive_sda(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_sda(port, state);
+ port->func->drive_sda(port, state);
}
static inline int
i2c_sense_scl(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_scl(port);
+ return port->func->sense_scl(port);
}
static inline int
i2c_sense_sda(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_sda(port);
+ return port->func->sense_sda(port);
}
static void
@@ -77,9 +77,8 @@ i2c_start(struct nouveau_i2c_port *port)
{
int ret = 0;
- port->state = i2c_sense_scl(port);
- port->state |= i2c_sense_sda(port) << 1;
- if (port->state != 3) {
+ if (!i2c_sense_scl(port) ||
+ !i2c_sense_sda(port)) {
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 1);
if (!i2c_raise_scl(port))
@@ -184,10 +183,13 @@ i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
static int
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
+ if (port->func->acquire)
+ port->func->acquire(port);
+
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
new file mode 100644
index 00000000000..2ad18840fe6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv04_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv04_i2c_port {
+ struct nouveau_i2c_port base;
+ u8 drive;
+ u8 sense;
+};
+
+static void
+nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
+}
+
+static int
+nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
+}
+
+static const struct nouveau_i2c_func
+nv04_i2c_func = {
+ .drive_scl = nv04_i2c_drive_scl,
+ .drive_sda = nv04_i2c_drive_sda,
+ .sense_scl = nv04_i2c_sense_scl,
+ .sense_sda = nv04_i2c_sense_sda,
+};
+
+static int
+nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv04_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv04_i2c_func;
+ port->drive = info->drive;
+ port->sense = info->sense;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv04_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
new file mode 100644
index 00000000000..f501ae25dbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv4e_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv4e_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+};
+
+static void
+nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00080000);
+}
+
+static const struct nouveau_i2c_func
+nv4e_i2c_func = {
+ .drive_scl = nv4e_i2c_drive_scl,
+ .drive_sda = nv4e_i2c_drive_sda,
+ .sense_scl = nv4e_i2c_sense_scl,
+ .sense_sda = nv4e_i2c_sense_sda,
+};
+
+static int
+nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv4e_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv4e_i2c_func;
+ port->addr = 0x600800 + info->drive;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv4e_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv4e_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
new file mode 100644
index 00000000000..378dfa324e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+void
+nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+void
+nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+int
+nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000001);
+}
+
+int
+nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000002);
+}
+
+static const struct nouveau_i2c_func
+nv50_i2c_func = {
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+const u32 nv50_i2c_addr[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
+
+static int
+nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv50_i2c_func;
+ port->state = 0x00000007;
+ port->addr = nv50_i2c_addr[info->drive];
+ return 0;
+}
+
+int
+nv50_i2c_port_init(struct nouveau_object *object)
+{
+ struct nv50_i2c_priv *priv = (void *)object->engine;
+ struct nv50_i2c_port *port = (void *)object;
+ nv_wr32(priv, port->addr, port->state);
+ return nouveau_i2c_port_init(&port->base);
+}
+
+static struct nouveau_oclass
+nv50_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
new file mode 100644
index 00000000000..4e5ba48ebf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
+#ifndef __NV50_I2C_H__
+#define __NV50_I2C_H__
+
+#include <subdev/i2c.h>
+
+struct nv50_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv50_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+ u32 data;
+ u32 state;
+};
+
+extern const u32 nv50_i2c_addr[];
+extern const int nv50_i2c_addr_nr;
+int nv50_i2c_port_init(struct nouveau_object *);
+int nv50_i2c_sense_scl(struct nouveau_i2c_port *);
+int nv50_i2c_sense_sda(struct nouveau_i2c_port *);
+void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
+void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
+
+int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv94_i2c_acquire(struct nouveau_i2c_port *);
+void nv94_i2c_release(struct nouveau_i2c_port *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
new file mode 100644
index 00000000000..61b771670bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(aux, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+int
+nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nv50_i2c_port *port = (void *)base;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ch = port->addr;
+ int ret, i;
+
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+ ret = auxch_init(aux, ch);
+ if (ret)
+ goto out;
+
+ stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
+
+ /* retry transaction a number of times on failure... */
+ ret = -EREMOTEIO;
+ for (retries = 0; retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
+ if (!(stat & 0x000f0f00)) {
+ ret = 0;
+ break;
+ }
+
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ auxch_fini(aux, ch);
+ return ret;
+}
+
+void
+nv94_i2c_acquire(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (port->ctrl) {
+ nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
+ nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
+ }
+}
+
+void
+nv94_i2c_release(struct nouveau_i2c_port *base)
+{
+}
+
+static const struct nouveau_i2c_func
+nv94_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+static int
+nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv94_i2c_func;
+ port->state = 7;
+ port->addr = nv50_i2c_addr[info->drive];
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static const struct nouveau_i2c_func
+nv94_aux_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .aux = nv94_aux,
+};
+
+int
+nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv94_aux_func;
+ port->addr = info->drive;
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->drive * 0x50);
+ port->data = 0x00002002;
+ }
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv94_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x94),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
new file mode 100644
index 00000000000..f761b8a610f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000010);
+}
+
+static int
+nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000020);
+}
+
+static const struct nouveau_i2c_func
+nvd0_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nvd0_i2c_sense_scl,
+ .sense_sda = nvd0_i2c_sense_sda,
+};
+
+static int
+nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nvd0_i2c_func;
+ port->state = 0x00000007;
+ port->addr = 0x00d014 + (info->drive * 0x20);
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static struct nouveau_oclass
+nvd0_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 23ebe477a6f..89da8fa7ea0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -37,7 +37,7 @@ nv04_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
{ 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
- { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 8d759f83032..5965add6dae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -38,6 +38,7 @@ nv50_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0000d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index ceb5c83f945..3a80b29dce0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,10 +35,12 @@ nv98_mc_intr[] = {
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0040d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 92796682722..42bbf72023a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,11 +36,13 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
index 839ca1edc13..4bde7f7f7b8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -156,15 +156,15 @@ mxms_foreach(struct nouveau_mxm *mxm, u8 types,
nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
for (j = headerlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
dump += headerlen;
for (i = 0; i < entries; i++, dump += recordlen) {
nv_debug(mxm, " ");
for (j = recordlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 1674c74a76c..f794dc89a3b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -29,6 +29,134 @@
#include "priv.h"
+static int
+nouveau_therm_update_trip(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
+ *cur_trip = NULL,
+ *last_trip = priv->last_trip;
+ u8 temp = therm->temp_get(therm);
+ u16 duty, i;
+
+ /* look for the trip point corresponding to the current temperature */
+ cur_trip = NULL;
+ for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+ if (temp >= trip[i].temp)
+ cur_trip = &trip[i];
+ }
+
+ /* account for the hysteresis cycle */
+ if (last_trip && temp <= (last_trip->temp) &&
+ temp > (last_trip->temp - last_trip->hysteresis))
+ cur_trip = last_trip;
+
+ if (cur_trip) {
+ duty = cur_trip->fan_duty;
+ priv->last_trip = cur_trip;
+ } else {
+ duty = 0;
+ priv->last_trip = NULL;
+ }
+
+ return duty;
+}
+
+static int
+nouveau_therm_update_linear(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ u8 linear_min_temp = priv->fan->bios.linear_min_temp;
+ u8 linear_max_temp = priv->fan->bios.linear_max_temp;
+ u8 temp = therm->temp_get(therm);
+ u16 duty;
+
+ /* handle the non-linear part first */
+ if (temp < linear_min_temp)
+ return priv->fan->bios.min_duty;
+ else if (temp > linear_max_temp)
+ return priv->fan->bios.max_duty;
+
+ /* we are in the linear zone */
+ duty = (temp - linear_min_temp);
+ duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+ duty /= (linear_max_temp - linear_min_temp);
+ duty += priv->fan->bios.min_duty;
+
+ return duty;
+}
+
+static void
+nouveau_therm_update(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (mode < 0)
+ mode = priv->mode;
+ priv->mode = mode;
+
+ switch (mode) {
+ case NOUVEAU_THERM_CTRL_MANUAL:
+ duty = nouveau_therm_fan_get(therm);
+ if (duty < 0)
+ duty = 100;
+ break;
+ case NOUVEAU_THERM_CTRL_AUTO:
+ if (priv->fan->bios.nr_fan_trip)
+ duty = nouveau_therm_update_trip(therm);
+ else
+ duty = nouveau_therm_update_linear(therm);
+ break;
+ case NOUVEAU_THERM_CTRL_NONE:
+ default:
+ goto done;
+ }
+
+ nv_debug(therm, "FAN target request: %d%%\n", duty);
+ nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
+
+done:
+ if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+ ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_therm_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, alarm);
+ nouveau_therm_update(&priv->base, -1);
+}
+
+int
+nouveau_therm_mode(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_device *device = nv_device(therm);
+ static const char *name[] = {
+ "disabled",
+ "manual",
+ "automatic"
+ };
+
+ /* The default PDAEMON ucode interferes with fan management */
+ if ((mode >= ARRAY_SIZE(name)) ||
+ (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+ return -EINVAL;
+
+ if (priv->mode == mode)
+ return 0;
+
+ nv_info(therm, "Thermal management: %s\n", name[mode]);
+ nouveau_therm_update(therm, mode);
+ return 0;
+}
+
int
nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type)
@@ -37,11 +165,11 @@ nouveau_therm_attr_get(struct nouveau_therm *therm,
switch (type) {
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
- return priv->bios_fan.min_duty;
+ return priv->fan->bios.min_duty;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
- return priv->bios_fan.max_duty;
+ return priv->fan->bios.max_duty;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return priv->fan.mode;
+ return priv->mode;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
return priv->bios_sensor.thrs_fan_boost.temp;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
@@ -73,42 +201,50 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
- if (value > priv->bios_fan.max_duty)
- value = priv->bios_fan.max_duty;
- priv->bios_fan.min_duty = value;
+ if (value > priv->fan->bios.max_duty)
+ value = priv->fan->bios.max_duty;
+ priv->fan->bios.min_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
- if (value < priv->bios_fan.min_duty)
- value = priv->bios_fan.min_duty;
- priv->bios_fan.max_duty = value;
+ if (value < priv->fan->bios.min_duty)
+ value = priv->fan->bios.min_duty;
+ priv->fan->bios.max_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return nouveau_therm_fan_set_mode(therm, value);
+ return nouveau_therm_mode(therm, value);
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
priv->bios_sensor.thrs_fan_boost.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
priv->bios_sensor.thrs_fan_boost.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
priv->bios_sensor.thrs_down_clock.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
priv->bios_sensor.thrs_down_clock.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
priv->bios_sensor.thrs_critical.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
priv->bios_sensor.thrs_critical.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
priv->bios_sensor.thrs_shutdown.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
priv->bios_sensor.thrs_shutdown.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
}
@@ -116,7 +252,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
}
int
-nouveau_therm_init(struct nouveau_object *object)
+_nouveau_therm_init(struct nouveau_object *object)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
@@ -126,19 +262,69 @@ nouveau_therm_init(struct nouveau_object *object)
if (ret)
return ret;
- if (priv->fan.percent >= 0)
- therm->fan_set(therm, priv->fan.percent);
-
+ if (priv->suspend >= 0)
+ nouveau_therm_mode(therm, priv->mode);
+ priv->sensor.program_alarms(therm);
return 0;
}
int
-nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
- priv->fan.percent = therm->fan_get(therm);
+ if (suspend) {
+ priv->suspend = priv->mode;
+ priv->mode = NOUVEAU_THERM_CTRL_NONE;
+ }
return nouveau_subdev_fini(&therm->base, suspend);
}
+
+int
+nouveau_therm_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
+{
+ struct nouveau_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
+ "therm", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->sensor.alarm_program_lock);
+
+ priv->base.fan_get = nouveau_therm_fan_user_get;
+ priv->base.fan_set = nouveau_therm_fan_user_set;
+ priv->base.fan_sense = nouveau_therm_fan_sense;
+ priv->base.attr_get = nouveau_therm_attr_get;
+ priv->base.attr_set = nouveau_therm_attr_set;
+ priv->mode = priv->suspend = -1; /* undefined */
+ return 0;
+}
+
+int
+nouveau_therm_preinit(struct nouveau_therm *therm)
+{
+ nouveau_therm_ic_ctor(therm);
+ nouveau_therm_sensor_ctor(therm);
+ nouveau_therm_fan_ctor(therm);
+
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ return 0;
+}
+
+void
+_nouveau_therm_dtor(struct nouveau_object *object)
+{
+ struct nouveau_therm_priv *priv = (void *)object;
+ kfree(priv->fan);
+ nouveau_subdev_destroy(&priv->base.base);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 52317868518..c728380d3d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -27,90 +27,107 @@
#include <core/object.h>
#include <core/device.h>
+
#include <subdev/gpio.h>
#include <subdev/timer.h>
-int
-nouveau_therm_fan_get(struct nouveau_therm *therm)
+static int
+nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
{
+ struct nouveau_therm *therm = fan->parent;
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (!priv->fan.pwm_get)
- return -ENODEV;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ unsigned long flags;
+ int ret = 0;
+ int duty;
+
+ /* update target fan speed, restricting to allowed range */
+ spin_lock_irqsave(&fan->lock, flags);
+ if (target < 0)
+ target = fan->percent;
+ target = max_t(u8, target, fan->bios.min_duty);
+ target = min_t(u8, target, fan->bios.max_duty);
+ if (fan->percent != target) {
+ nv_debug(therm, "FAN target: %d\n", target);
+ fan->percent = target;
+ }
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
- if (ret == 0 && divs) {
- divs = max(divs, duty);
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
- return (duty * 100) / divs;
- }
+ /* check that we're not already at the target duty cycle */
+ duty = fan->get(therm);
+ if (duty == target)
+ goto done;
+
+ /* smooth out the fanspeed increase/decrease */
+ if (!immediate && duty >= 0) {
+ /* the constant "3" is a rough approximation taken from
+ * nvidia's behaviour.
+ * it is meant to bump the fan speed more incrementally
+ */
+ if (duty < target)
+ duty = min(duty + 3, target);
+ else if (duty > target)
+ duty = max(duty - 3, target);
+ } else {
+ duty = target;
+ }
- return gpio->get(gpio, 0, func.func, func.line) * 100;
+ nv_debug(therm, "FAN update: %d\n", duty);
+ ret = fan->set(therm, duty);
+ if (ret)
+ goto done;
+
+ /* schedule next fan update, if not at target speed already */
+ if (list_empty(&fan->alarm.head) && target != duty) {
+ u16 bump_period = fan->bios.bump_period;
+ u16 slow_down_period = fan->bios.slow_down_period;
+ u64 delay;
+
+ if (duty > target)
+ delay = slow_down_period;
+ else if (duty == target)
+ delay = min(bump_period, slow_down_period) ;
+ else
+ delay = bump_period;
+
+ ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
}
- return -ENODEV;
+done:
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+}
+
+static void
+nouveau_fan_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
+ nouveau_fan_update(fan, false, -1);
}
int
-nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
+nouveau_therm_fan_get(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (priv->fan.mode == FAN_CONTROL_NONE)
- return -EINVAL;
-
- if (!priv->fan.pwm_set)
- return -ENODEV;
-
- if (percent < priv->bios_fan.min_duty)
- percent = priv->bios_fan.min_duty;
- if (percent > priv->bios_fan.max_duty)
- percent = priv->bios_fan.max_duty;
-
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- divs = priv->bios_perf_fan.pwm_divisor;
- if (priv->bios_fan.pwm_freq) {
- divs = 1;
- if (priv->fan.pwm_clock)
- divs = priv->fan.pwm_clock(therm);
- divs /= priv->bios_fan.pwm_freq;
- }
-
- duty = ((divs * percent) + 99) / 100;
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
-
- ret = priv->fan.pwm_set(therm, func.line, divs, duty);
- return ret;
- }
+ return priv->fan->get(therm);
+}
- return -ENODEV;
+int
+nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return nouveau_fan_update(priv->fan, immediate, percent);
}
int
nouveau_therm_fan_sense(struct nouveau_therm *therm)
{
+ struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_timer *ptimer = nouveau_timer(therm);
struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
u32 cycles, cur, prev;
u64 start, end, tach;
- if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
+ if (priv->fan->tach.func == DCB_GPIO_UNUSED)
return -ENODEV;
/* Time a complete rotation and extrapolate to RPM:
@@ -118,12 +135,12 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
* We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
*/
start = ptimer->read(ptimer);
- prev = gpio->get(gpio, 0, func.func, func.line);
+ prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
cycles = 0;
do {
usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
- cur = gpio->get(gpio, 0, func.func, func.line);
+ cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
if (prev != cur) {
if (!start)
start = ptimer->read(ptimer);
@@ -142,34 +159,6 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
}
int
-nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode)
-{
- struct nouveau_therm_priv *priv = (void *)therm;
-
- if (priv->fan.mode == mode)
- return 0;
-
- if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
- return -EINVAL;
-
- switch (mode)
- {
- case FAN_CONTROL_NONE:
- nv_info(therm, "switch fan to no-control mode\n");
- break;
- case FAN_CONTROL_MANUAL:
- nv_info(therm, "switch fan to manual mode\n");
- break;
- case FAN_CONTROL_NR:
- break;
- }
-
- priv->fan.mode = mode;
- return 0;
-}
-
-int
nouveau_therm_fan_user_get(struct nouveau_therm *therm)
{
return nouveau_therm_fan_get(therm);
@@ -180,55 +169,86 @@ nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->fan.mode != FAN_CONTROL_MANUAL)
+ if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
return -EINVAL;
- return nouveau_therm_fan_set(therm, percent);
+ return nouveau_therm_fan_set(therm, true, percent);
}
-void
+static void
nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- priv->bios_fan.pwm_freq = 0;
- priv->bios_fan.min_duty = 0;
- priv->bios_fan.max_duty = 100;
+ priv->fan->bios.pwm_freq = 0;
+ priv->fan->bios.min_duty = 0;
+ priv->fan->bios.max_duty = 100;
+ priv->fan->bios.bump_period = 500;
+ priv->fan->bios.slow_down_period = 2000;
+ priv->fan->bios.linear_min_temp = 40;
+ priv->fan->bios.linear_max_temp = 85;
}
-
static void
nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->bios_fan.min_duty > 100)
- priv->bios_fan.min_duty = 100;
- if (priv->bios_fan.max_duty > 100)
- priv->bios_fan.max_duty = 100;
+ if (priv->fan->bios.min_duty > 100)
+ priv->fan->bios.min_duty = 100;
+ if (priv->fan->bios.max_duty > 100)
+ priv->fan->bios.max_duty = 100;
- if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
- priv->bios_fan.min_duty = priv->bios_fan.max_duty;
-}
-
-int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
-{
- return 1;
+ if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
+ priv->fan->bios.min_duty = priv->fan->bios.max_duty;
}
int
nouveau_therm_fan_ctor(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
struct nouveau_bios *bios = nouveau_bios(therm);
+ struct dcb_gpio_func func;
+ int ret;
+ /* attempt to locate a drivable fan, and determine control method */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+ if (ret == 0) {
+ if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
+ nv_debug(therm, "GPIO_FAN is in input mode\n");
+ ret = -EINVAL;
+ } else {
+ ret = nouveau_fanpwm_create(therm, &func);
+ if (ret != 0)
+ ret = nouveau_fantog_create(therm, &func);
+ }
+ }
+
+ /* no controllable fan found, create a dummy fan module */
+ if (ret != 0) {
+ ret = nouveau_fannil_create(therm);
+ if (ret)
+ return ret;
+ }
+
+ nv_info(therm, "FAN control: %s\n", priv->fan->type);
+
+ /* attempt to detect a tachometer connection */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+ if (ret)
+ priv->fan->tach.func = DCB_GPIO_UNUSED;
+
+ /* initialise fan bump/slow update handling */
+ priv->fan->parent = therm;
+ nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
+ spin_lock_init(&priv->fan->lock);
+
+ /* other random init... */
nouveau_therm_fan_set_defaults(therm);
- nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
- if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
+ nvbios_perf_fan_parse(bios, &priv->fan->perf);
+ if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
nv_error(therm, "parsing the thermal table failed\n");
nouveau_therm_fan_safety_checks(therm);
-
- nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
new file mode 100644
index 00000000000..b78c182e1d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+nouveau_fannil_get(struct nouveau_therm *therm)
+{
+ return -ENODEV;
+}
+
+static int
+nouveau_fannil_set(struct nouveau_therm *therm, int percent)
+{
+ return -ENODEV;
+}
+
+int
+nouveau_fannil_create(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fan *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = priv;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->type = "none / external";
+ priv->get = nouveau_fannil_get;
+ priv->set = nouveau_fannil_set;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
new file mode 100644
index 00000000000..5f71db8e899
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * Martin Peres
+ */
+
+#include <core/option.h>
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nouveau_fanpwm_priv {
+ struct nouveau_fan base;
+ struct dcb_gpio_func func;
+};
+
+static int
+nouveau_fanpwm_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+ if (ret == 0 && divs) {
+ divs = max(divs, duty);
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+}
+
+static int
+nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ divs = priv->base.perf.pwm_divisor;
+ if (priv->base.bios.pwm_freq) {
+ divs = 1;
+ if (therm->pwm_clock)
+ divs = therm->pwm_clock(therm);
+ divs /= priv->base.bios.pwm_freq;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+
+ ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+ if (ret == 0)
+ ret = therm->pwm_ctrl(therm, priv->func.line, true);
+ return ret;
+}
+
+int
+nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_device *device = nv_device(therm);
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv;
+ u32 divs, duty;
+
+ if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
+ !therm->pwm_ctrl ||
+ therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "PWM";
+ priv->base.get = nouveau_fanpwm_get;
+ priv->base.set = nouveau_fanpwm_set;
+ priv->func = *func;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
new file mode 100644
index 00000000000..e601773ee47
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+struct nouveau_fantog_priv {
+ struct nouveau_fan base;
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ u32 period_us;
+ u32 percent;
+ struct dcb_gpio_func func;
+};
+
+static void
+nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
+ struct nouveau_timer *ptimer = nouveau_timer(tpriv);
+ struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (percent < 0)
+ percent = priv->percent;
+ priv->percent = percent;
+
+ duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
+ gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+
+ if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
+ u64 next_change = (percent * priv->period_us) / 100;
+ if (!duty)
+ next_change = priv->period_us - next_change;
+ ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_fantog_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fantog_priv *priv =
+ container_of(alarm, struct nouveau_fantog_priv, alarm);
+ nouveau_fantog_update(priv, -1);
+}
+
+static int
+nouveau_fantog_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ return priv->percent;
+}
+
+static int
+nouveau_fantog_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ if (therm->pwm_ctrl)
+ therm->pwm_ctrl(therm, priv->func.line, false);
+ nouveau_fantog_update(priv, percent);
+ return 0;
+}
+
+int
+nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "toggle";
+ priv->base.get = nouveau_fantog_get;
+ priv->base.set = nouveau_fantog_set;
+ nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
+ priv->period_us = 100000; /* 10Hz */
+ priv->percent = 100;
+ priv->func = *func;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e512ff0aae6..e24090bac19 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -31,7 +31,7 @@ static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
struct i2c_board_info *info)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
+ struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -53,6 +53,31 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
return true;
}
+static struct i2c_board_info
+nv_board_infos[] = {
+ { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ { I2C_BOARD_INFO("w83781d", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("adt7473", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2c) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
+ { I2C_BOARD_INFO("lm99", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x18) },
+ { I2C_BOARD_INFO("adm1021", 0x19) },
+ { I2C_BOARD_INFO("adm1021", 0x1a) },
+ { I2C_BOARD_INFO("adm1021", 0x29) },
+ { I2C_BOARD_INFO("adm1021", 0x2a) },
+ { I2C_BOARD_INFO("adm1021", 0x2b) },
+ { I2C_BOARD_INFO("adm1021", 0x4c) },
+ { I2C_BOARD_INFO("adm1021", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x4e) },
+ { I2C_BOARD_INFO("lm63", 0x18) },
+ { I2C_BOARD_INFO("lm63", 0x4e) },
+ { }
+};
+
void
nouveau_therm_ic_ctor(struct nouveau_therm *therm)
{
@@ -60,29 +85,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
struct nouveau_bios *bios = nouveau_bios(therm);
struct nouveau_i2c *i2c = nouveau_i2c(therm);
struct nvbios_extdev_func extdev_entry;
- struct i2c_board_info info[] = {
- { I2C_BOARD_INFO("w83l785ts", 0x2d) },
- { I2C_BOARD_INFO("w83781d", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2e) },
- { I2C_BOARD_INFO("adt7473", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2c) },
- { I2C_BOARD_INFO("f75375", 0x2e) },
- { I2C_BOARD_INFO("lm99", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x18) },
- { I2C_BOARD_INFO("adm1021", 0x19) },
- { I2C_BOARD_INFO("adm1021", 0x1a) },
- { I2C_BOARD_INFO("adm1021", 0x29) },
- { I2C_BOARD_INFO("adm1021", 0x2a) },
- { I2C_BOARD_INFO("adm1021", 0x2b) },
- { I2C_BOARD_INFO("adm1021", 0x4c) },
- { I2C_BOARD_INFO("adm1021", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x4e) },
- { I2C_BOARD_INFO("lm63", 0x18) },
- { I2C_BOARD_INFO("lm63", 0x4e) },
- { }
- };
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
struct i2c_board_info board[] = {
@@ -111,6 +113,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
- probe_monitoring_device);
+ i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+ nv_board_infos, probe_monitoring_device);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index fcf2cfe731d..0f5363edb96 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv40_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
nv40_sensor_setup(struct nouveau_therm *therm)
{
@@ -34,6 +38,7 @@ nv40_sensor_setup(struct nouveau_therm *therm)
if (device->chipset >= 0x46) {
nv_mask(therm, 0x15b8, 0x80000000, 0);
nv_wr32(therm, 0x15b0, 0x80003fff);
+ mdelay(10); /* wait for the temperature to stabilize */
return nv_rd32(therm, 0x15b4) & 0x3fff;
} else {
nv_wr32(therm, 0x15b0, 0xff);
@@ -75,7 +80,20 @@ nv40_temp_get(struct nouveau_therm *therm)
return core_temp;
}
-int
+static int
+nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 mask = enable ? 0x80000000 : 0x0000000;
+ if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
+ else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+ else {
+ nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int
nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
if (line == 2) {
@@ -101,15 +119,15 @@ nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
return -EINVAL;
}
-int
+static int
nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
{
if (line == 2) {
- nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
} else
if (line == 9) {
nv_wr32(therm, 0x0015f8, divs);
- nv_wr32(therm, 0x0015f4, duty | 0x80000000);
+ nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
} else {
nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
@@ -118,37 +136,51 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
+static void
+nv40_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ uint32_t stat = nv_rd32(therm, 0x1100);
+
+ /* traitement */
+
+ /* ack all IRQs */
+ nv_wr32(therm, 0x1100, 0x70000);
+
+ nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+}
+
static int
nv40_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv40_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv40_fan_pwm_get;
+ priv->base.base.pwm_set = nv40_fan_pwm_set;
+ priv->base.base.temp_get = nv40_temp_get;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ nv_subdev(priv)->intr = nv40_therm_intr;
+ return nouveau_therm_preinit(&priv->base.base);
+}
- priv->fan.pwm_get = nv40_fan_pwm_get;
- priv->fan.pwm_set = nv40_fan_pwm_set;
+static int
+nv40_therm_init(struct nouveau_object *object)
+{
+ struct nouveau_therm *therm = (void *)object;
- therm->temp_get = nv40_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
+ nv40_sensor_setup(therm);
- return 0;
+ return _nouveau_therm_init(object);
}
struct nouveau_oclass
@@ -157,7 +189,7 @@ nv40_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = nv40_therm_init,
+ .fini = _nouveau_therm_fini,
},
-}; \ No newline at end of file
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 9360ddd469e..86632cbd65c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv50_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
{
@@ -51,6 +55,16 @@ pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
}
int
+nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000001 : 0x00000000;
+ int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+ if (ret == 0)
+ nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+ return ret;
+}
+
+int
nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
@@ -73,7 +87,6 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
if (ret)
return ret;
- nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
nv_wr32(therm, 0x00e114 + (id * 8), divs);
nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
return 0;
@@ -111,38 +124,178 @@ nv50_temp_get(struct nouveau_therm *therm)
return nv_rd32(therm, 0x20400);
}
+static void
+nv50_therm_program_alarms(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+ nv_wr32(therm, 0x20000, 0x000003ff);
+
+ /* shutdown: The computer should be shutdown when reached */
+ nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+ nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+ /* THRS_1 : fan boost*/
+ nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+ /* THRS_2 : critical */
+ nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+ /* THRS_4 : down clock */
+ nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+ nv_info(therm,
+ "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+ uint32_t thrs_reg, u8 status_bit,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp, cur;
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ temp = nv_rd32(therm, thrs_reg);
+
+ /* program the next threshold */
+ if (temp == thrs->temp) {
+ nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else {
+ nv_wr32(therm, thrs_reg, thrs->temp);
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ }
+
+ /* fix the state (in case someone reprogrammed the alarms) */
+ cur = therm->temp_get(therm);
+ if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+ cur < thrs->temp - thrs->hysteresis)
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+ /* find the direction */
+ if (prev_state < new_state)
+ direction = NOUVEAU_THERM_THRS_RISING;
+ else if (prev_state > new_state)
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ else
+ return;
+
+ /* advertise a change in direction */
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv50_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+ uint32_t intr;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ intr = nv_rd32(therm, 0x20100);
+
+ /* THRS_4: downclock */
+ if (intr & 0x002) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+ &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+ intr &= ~0x002;
+ }
+
+ /* shutdown */
+ if (intr & 0x004) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+ &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+ intr &= ~0x004;
+ }
+
+ /* THRS_1 : fan boost */
+ if (intr & 0x008) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+ &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+ intr &= ~0x008;
+ }
+
+ /* THRS_2 : critical */
+ if (intr & 0x010) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+ &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+ intr &= ~0x010;
+ }
+
+ if (intr)
+ nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+ /* ACK everything */
+ nv_wr32(therm, 0x20100, 0xffffffff);
+ nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
static int
nv50_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv50_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.sensor.program_alarms = nv50_therm_program_alarms;
+ nv_subdev(priv)->intr = nv50_therm_intr;
- priv->fan.pwm_get = nv50_fan_pwm_get;
- priv->fan.pwm_set = nv50_fan_pwm_set;
- priv->fan.pwm_clock = nv50_fan_pwm_clock;
+ /* init the thresholds */
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_SHUTDOWN,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_FANBOOST,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_CRITICAL,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_DOWNCLOCK,
+ NOUVEAU_THERM_THRS_LOWER);
- therm->temp_get = nv50_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
-
- return 0;
+ return nouveau_therm_preinit(&priv->base.base);
}
struct nouveau_oclass
@@ -151,7 +304,7 @@ nv50_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = _nouveau_therm_init,
+ .fini = _nouveau_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
new file mode 100644
index 00000000000..2dcc5437116
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nva3_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+int
+nva3_therm_fan_sense(struct nouveau_therm *therm)
+{
+ u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
+ u32 ctrl = nv_rd32(therm, 0x00e720);
+ if (ctrl & 0x00000001)
+ return tach * 60;
+ return -ENODEV;
+}
+
+static int
+nva3_therm_init(struct nouveau_object *object)
+{
+ struct nva3_therm_priv *priv = (void *)object;
+ struct dcb_gpio_func *tach = &priv->base.fan->tach;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (tach->func != DCB_GPIO_UNUSED) {
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nva3_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nva3_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nva3_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
new file mode 100644
index 00000000000..d7d30ee8332
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nvd0_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+static int
+pwm_info(struct nouveau_therm *therm, int line)
+{
+ u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
+ switch (gpio & 0x000000c0) {
+ case 0x00000000: /* normal mode, possibly pwm forced off by us */
+ case 0x00000040: /* nvio special */
+ switch (gpio & 0x0000001f) {
+ case 0x19: return 1;
+ case 0x1c: return 0;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
+ return -ENODEV;
+}
+
+static int
+nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000040 : 0x00000000;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
+ *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
+ *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_wr32(therm, 0x00e114 + (indx * 8), divs);
+ nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_clock(struct nouveau_therm *therm)
+{
+ return (nv_device(therm)->crystal * 1000) / 20;
+}
+
+static int
+nvd0_therm_init(struct nouveau_object *object)
+{
+ struct nvd0_therm_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
+ nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nvd0_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nvd0_fan_pwm_get;
+ priv->base.base.pwm_set = nvd0_fan_pwm_set;
+ priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nvd0_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nvd0_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 1c3cd6abc36..06b98706b3f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -1,3 +1,6 @@
+#ifndef __NVTHERM_PRIV_H__
+#define __NVTHERM_PRIV_H__
+
/*
* Copyright 2012 The Nouveau community
*
@@ -25,33 +28,81 @@
#include <subdev/therm.h>
#include <subdev/bios/extdev.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+struct nouveau_fan {
+ struct nouveau_therm *parent;
+ const char *type;
+
+ struct nvbios_therm_fan bios;
+ struct nvbios_perf_fan perf;
+
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ int percent;
+
+ int (*get)(struct nouveau_therm *therm);
+ int (*set)(struct nouveau_therm *therm, int percent);
+
+ struct dcb_gpio_func tach;
+};
+
+enum nouveau_therm_thrs_direction {
+ NOUVEAU_THERM_THRS_FALLING = 0,
+ NOUVEAU_THERM_THRS_RISING = 1
+};
+
+enum nouveau_therm_thrs_state {
+ NOUVEAU_THERM_THRS_LOWER = 0,
+ NOUVEAU_THERM_THRS_HIGHER = 1
+};
+
+enum nouveau_therm_thrs {
+ NOUVEAU_THERM_THRS_FANBOOST = 0,
+ NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
+ NOUVEAU_THERM_THRS_CRITICAL = 2,
+ NOUVEAU_THERM_THRS_SHUTDOWN = 3,
+ NOUVEAU_THERM_THRS_NR
+};
struct nouveau_therm_priv {
struct nouveau_therm base;
+ /* automatic thermal management */
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ struct nouveau_therm_trip_point *last_trip;
+ int mode;
+ int suspend;
+
/* bios */
struct nvbios_therm_sensor bios_sensor;
- struct nvbios_therm_fan bios_fan;
- struct nvbios_perf_fan bios_perf_fan;
/* fan priv */
+ struct nouveau_fan *fan;
+
+ /* alarms priv */
struct {
- enum nouveau_therm_fan_mode mode;
- int percent;
+ spinlock_t alarm_program_lock;
+ struct nouveau_alarm therm_poll_alarm;
+ enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
+ void (*program_alarms)(struct nouveau_therm *);
+ } sensor;
- int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
- int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nouveau_therm *);
- } fan;
+ /* what should be done if the card overheats */
+ struct {
+ void (*downclock)(struct nouveau_therm *, bool active);
+ void (*pause)(struct nouveau_therm *, bool active);
+ } emergency;
/* ic */
struct i2c_client *ic;
};
-int nouveau_therm_init(struct nouveau_object *object);
-int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
+int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
int nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type);
int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -63,11 +114,35 @@ int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_get(struct nouveau_therm *therm);
-int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
+int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
-int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode);
-
int nouveau_therm_fan_sense(struct nouveau_therm *therm);
+
+int nouveau_therm_preinit(struct nouveau_therm *);
+
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st);
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs);
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir);
+void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
+
+int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
+int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
+int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
+int nv50_fan_pwm_clock(struct nouveau_therm *);
+int nv50_temp_get(struct nouveau_therm *therm);
+
+int nva3_therm_fan_sense(struct nouveau_therm *);
+
+int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fannil_create(struct nouveau_therm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 204282301fb..b37624af829 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -58,11 +58,171 @@ static void
nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *s = &priv->bios_sensor;
if (!priv->bios_sensor.slope_div)
priv->bios_sensor.slope_div = 1;
if (!priv->bios_sensor.offset_den)
priv->bios_sensor.offset_den = 1;
+
+ /* enforce a minimum hysteresis on thresholds */
+ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
+ s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
+ s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
+ s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
+}
+
+/* must be called with alarm_program_lock taken ! */
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ priv->sensor.alarm_state[thrs] = st;
+}
+
+/* must be called with alarm_program_lock taken ! */
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return priv->sensor.alarm_state[thrs];
+}
+
+static void
+nv_poweroff_work(struct work_struct *work)
+{
+ orderly_poweroff(true);
+ kfree(work);
+}
+
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ bool active;
+ const char *thresolds[] = {
+ "fanboost", "downclock", "critical", "shutdown"
+ };
+ uint8_t temperature = therm->temp_get(therm);
+
+ if (thrs < 0 || thrs > 3)
+ return;
+
+ if (dir == NOUVEAU_THERM_THRS_FALLING)
+ nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+ else
+ nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+
+ active = (dir == NOUVEAU_THERM_THRS_RISING);
+ switch (thrs) {
+ case NOUVEAU_THERM_THRS_FANBOOST:
+ if (active) {
+ nouveau_therm_fan_set(therm, true, 100);
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+ }
+ break;
+ case NOUVEAU_THERM_THRS_DOWNCLOCK:
+ if (priv->emergency.downclock)
+ priv->emergency.downclock(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_CRITICAL:
+ if (priv->emergency.pause)
+ priv->emergency.pause(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_SHUTDOWN:
+ if (active) {
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, nv_poweroff_work);
+ schedule_work(work);
+ }
+ }
+ break;
+ case NOUVEAU_THERM_THRS_NR:
+ break;
+ }
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp = therm->temp_get(therm);
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+
+ if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
+ direction = NOUVEAU_THERM_THRS_RISING;
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else if (temp <= thrs->temp - thrs->hysteresis &&
+ prev_state == NOUVEAU_THERM_THRS_HIGHER) {
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ } else
+ return; /* nothing to do */
+
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+alarm_timer_callback(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ struct nouveau_therm *therm = &priv->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+
+ /* schedule the next poll in one second */
+ if (list_empty(&alarm->head))
+ ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+void
+nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+
+ nv_info(therm,
+ "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+ alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
int
@@ -71,6 +231,8 @@ nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_bios *bios = nouveau_bios(therm);
+ nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+
nouveau_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
&priv->bios_sensor))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c26ca9bef67..8e1bae4f12e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -79,7 +79,7 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
/* execute any pending alarm handlers */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del(&alarm->head);
+ list_del_init(&alarm->head);
alarm->func(alarm);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index d0da230d770..74acf0f8778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -3,7 +3,7 @@
#define ROM_BIOS_PAGE 4096
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
bool nouveau_is_optimus(void);
bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f65b20a375f..5d940302d2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -84,6 +84,8 @@ nv40_backlight_init(struct drm_connector *connector)
props.max_brightness = 31;
bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
&nv40_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
drm->backlight = bd;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 865eddfa30a..50a6dd02f7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -678,23 +678,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
return 0;
}
-static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
-{
- /*
- * offset + 0 (8 bits): Micro version
- * offset + 1 (8 bits): Minor version
- * offset + 2 (8 bits): Chip version
- * offset + 3 (8 bits): Major version
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- bios->major_version = bios->data[offset + 3];
- bios->chip_version = bios->data[offset + 2];
- NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
- bios->data[offset + 3], bios->data[offset + 2],
- bios->data[offset + 1], bios->data[offset]);
-}
-
static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
{
/*
@@ -710,12 +693,6 @@ static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
*/
bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
- bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
- bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
- bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
- bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
- bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
- bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
}
static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
@@ -765,25 +742,6 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return 0;
}
-static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
-{
- /*
- * offset + 8 (16 bits): PLL limits table pointer
- *
- * There's more in here, but that's unknown.
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (bitentry->length < 10) {
- NV_ERROR(drm, "Do not understand BIT C table\n");
- return -EINVAL;
- }
-
- bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
-
- return 0;
-}
-
static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
@@ -821,12 +779,6 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
parse_script_table_pointers(bios, bitentry->offset);
-
- if (bitentry->length >= 16)
- bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
- if (bitentry->length >= 18)
- bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
-
return 0;
}
@@ -852,8 +804,6 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -EINVAL;
}
- parse_bios_version(dev, bios, bitentry->offset);
-
/*
* bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
* Quadro identity crisis), other bits possibly as for BMP feature byte
@@ -1078,9 +1028,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
return ret;
if (bios->major_version >= 0x60) /* g80+ */
parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
- ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
- if (ret)
- return ret;
parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
if (ret)
@@ -1228,8 +1175,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
*/
bios->feature_byte = bmp[9];
- parse_bios_version(dev, bios, offset + 10);
-
if (bmp_version_major < 5 || bmp_version_minor < 0x10)
bios->old_style_init = true;
legacy_scripts_offset = 18;
@@ -1276,8 +1221,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
}
+#if 0
if (bmplength > 143)
bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+#endif
if (bmplength > 157)
bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
@@ -1522,6 +1469,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
case DCB_OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
switch ((conf & 0x00e00000) >> 21) {
case 0:
entry->dpconf.link_bw = 162000;
@@ -1543,8 +1491,10 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
break;
case DCB_OUTPUT_TMDS:
- if (dcb->version >= 0x40)
+ if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
+ }
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
@@ -1937,9 +1887,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (conn[0] != 0xff) {
NV_INFO(drm, "DCB conn %02d: ", idx);
if (olddcb_conntab(dev)[3] < 4)
- printk("%04x\n", ROM16(conn[0]));
+ pr_cont("%04x\n", ROM16(conn[0]));
else
- printk("%08x\n", ROM32(conn[0]));
+ pr_cont("%08x\n", ROM32(conn[0]));
}
}
dcb_fake_connectors(bios);
@@ -2052,45 +2002,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
-
- memset(bios, 0, sizeof(struct nvbios));
- spin_lock_init(&bios->lock);
- bios->dev = dev;
-
- bios->data = nouveau_bios(drm->device)->data;
- bios->length = nouveau_bios(drm->device)->size;
- return true;
-}
+ struct nouveau_bios *bios = nouveau_bios(drm->device);
+ struct nvbios *legacy = &drm->vbios;
+
+ memset(legacy, 0, sizeof(struct nvbios));
+ spin_lock_init(&legacy->lock);
+ legacy->dev = dev;
+
+ legacy->data = bios->data;
+ legacy->length = bios->size;
+ legacy->major_version = bios->version.major;
+ legacy->chip_version = bios->version.chip;
+ if (bios->bit_offset) {
+ legacy->type = NVBIOS_BIT;
+ legacy->offset = bios->bit_offset;
+ return !parse_bit_structure(legacy, legacy->offset + 6);
+ } else
+ if (bios->bmp_offset) {
+ legacy->type = NVBIOS_BMP;
+ legacy->offset = bios->bmp_offset;
+ return !parse_bmp_structure(dev, legacy, legacy->offset);
+ }
-static int nouveau_parse_vbios_struct(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
- const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
- int offset;
-
- offset = findstr(bios->data, bios->length,
- bit_signature, sizeof(bit_signature));
- if (offset) {
- NV_INFO(drm, "BIT BIOS found\n");
- bios->type = NVBIOS_BIT;
- bios->offset = offset;
- return parse_bit_structure(bios, offset + 6);
- }
-
- offset = findstr(bios->data, bios->length,
- bmp_signature, sizeof(bmp_signature));
- if (offset) {
- NV_INFO(drm, "BMP BIOS found\n");
- bios->type = NVBIOS_BMP;
- bios->offset = offset;
- return parse_bmp_structure(dev, bios, offset);
- }
-
- NV_ERROR(drm, "No known BIOS signature found\n");
- return -ENODEV;
+ return false;
}
int
@@ -2146,10 +2080,6 @@ nouveau_bios_init(struct drm_device *dev)
if (!NVInitVBIOS(dev))
return -ENODEV;
- ret = nouveau_parse_vbios_struct(dev);
- if (ret)
- return ret;
-
ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index f68c54ca422..7ccd28f11ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -107,20 +107,10 @@ struct nvbios {
bool old_style_init;
uint16_t init_script_tbls_ptr;
uint16_t extra_init_script_tbl_ptr;
- uint16_t macro_index_tbl_ptr;
- uint16_t macro_tbl_ptr;
- uint16_t condition_tbl_ptr;
- uint16_t io_condition_tbl_ptr;
- uint16_t io_flag_condition_tbl_ptr;
- uint16_t init_function_tbl_ptr;
-
- uint16_t pll_limit_tbl_ptr;
+
uint16_t ram_restrict_tbl_ptr;
uint8_t ram_restrict_group_count;
- uint16_t some_script_ptr; /* BIT I + 14 */
- uint16_t init96_tbl_ptr; /* BIT I + 16 */
-
struct dcb_table dcb;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1699a9083a2..11ca82148ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -301,17 +301,18 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (nvbo->pin_refcnt++)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
goto out;
nouveau_bo_placement_set(nvbo, memtype, 0);
@@ -329,10 +330,8 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
break;
}
}
- ttm_bo_unreserve(bo);
out:
- if (unlikely(ret))
- nvbo->pin_refcnt--;
+ ttm_bo_unreserve(bo);
return ret;
}
@@ -343,13 +342,13 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- if (--nvbo->pin_refcnt)
- return 0;
-
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
+ if (--nvbo->pin_refcnt)
+ goto out;
+
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
ret = nouveau_bo_validate(nvbo, false, false);
@@ -366,6 +365,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
}
}
+out:
ttm_bo_unreserve(bo);
return ret;
}
@@ -562,7 +562,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 25ca37989d2..653dbbbd4fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -28,10 +28,11 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
struct drm_gem_object *gem;
+
+ /* protect by the ttm reservation lock */
int pin_refcnt;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
static inline struct nouveau_bo *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 174300b6a02..eaa80a2b81e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -51,14 +51,15 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
- NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
+ NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
+ chan->handle, cli->base.name);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e620ba8271b..4dd7ae2ac6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
static int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
-static void nouveau_connector_hotplug(void *, int);
-
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
@@ -100,22 +98,6 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_gpio *gpio;
- struct nouveau_drm *drm;
- struct drm_device *dev;
-
- if (!nv_connector)
- return;
-
- dev = nv_connector->base.dev;
- drm = nouveau_drm(dev);
- gpio = nouveau_gpio(drm->device);
-
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- }
-
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -130,7 +112,6 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = NULL;
int i, panel = -ENODEV;
@@ -160,8 +141,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- if (nv_encoder->dcb->i2c_index < 0xf)
- port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ port = nv_encoder->i2c;
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
break;
@@ -399,9 +379,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- *(nv_connector->edid) = *edid;
- status = connector_status_connected;
+ nv_connector->edid =
+ kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ if (nv_connector->edid)
+ status = connector_status_connected;
}
}
@@ -911,6 +892,37 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static void
+nouveau_connector_hotplug_work(struct work_struct *work)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(work, struct nouveau_connector, hpd_work);
+ struct drm_connector *connector = &nv_connector->base;
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
+
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+
+ drm_helper_hpd_irq_event(dev);
+}
+
+static int
+nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(event, struct nouveau_connector, hpd_func);
+ schedule_work(&nv_connector->hpd_work);
+ return NVKM_EVENT_KEEP;
+}
+
static int
drm_conntype_from_dcb(enum dcb_connector_type dcb)
{
@@ -961,6 +973,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
+ INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
nv_connector->index = index;
/* attempt to parse vbios connector type and hotplug gpio */
@@ -975,8 +988,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
- nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
- nv_connector->hpd = hpd[nv_connector->hpd];
+ ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
+ DCB_GPIO_UNUSED, &nv_connector->hpd);
+ nv_connector->hpd_func.func = nouveau_connector_hotplug;
+ if (ret)
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
@@ -999,7 +1015,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
- nv_connector->hpd = DCB_GPIO_UNUSED;
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
}
/* no vbios data, or an unknown dcb connector type - attempt to
@@ -1126,31 +1142,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- if (ret == 0)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
+ if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_sysfs_connector_add(connector);
return connector;
}
-
-static void
-nouveau_connector_hotplug(void *data, int plugged)
-{
- struct drm_connector *connector = data;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
-
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-
- drm_helper_hpd_irq_event(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 20eb84cce9e..6e399aad491 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,6 +30,11 @@
#include <drm/drm_edid.h>
#include "nouveau_crtc.h"
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
struct nouveau_i2c_port;
enum nouveau_underscan_type {
@@ -61,7 +66,10 @@ struct nouveau_connector {
enum dcb_connector_type type;
u8 index;
u8 *dcb;
- u8 hpd;
+
+ struct dcb_gpio_func hpd;
+ struct work_struct hpd_work;
+ struct nouveau_eventh hpd_func;
int dithering_mode;
int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 00000000000..5392e07edfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_debugfs.h"
+#include "nouveau_drm.h"
+
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ int i;
+
+ for (i = 0; i < drm->vbios.length; i++)
+ seq_printf(m, "%c", drm->vbios.data[i]);
+ return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
new file mode 100644
index 00000000000..a62af6fb5f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -0,0 +1,22 @@
+#ifndef __NOUVEAU_DEBUGFS_H__
+#define __NOUVEAU_DEBUGFS_H__
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_DEBUG_FS)
+extern int nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 508b00a2ce0..4610c3a29bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -41,6 +41,8 @@
#include <subdev/gpio.h>
#include <engine/disp.h>
+#include <core/class.h>
+
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
@@ -78,11 +80,6 @@ nouveau_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
- return ret;
- }
-
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
@@ -125,6 +122,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
}
}
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
+ if (ret) {
+ return ret;
+ }
+
return 0;
}
@@ -231,8 +233,10 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, true);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_get(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
return ret;
@@ -249,37 +253,20 @@ nouveau_display_fini(struct drm_device *dev)
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, false);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_put(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
-static void
-nouveau_display_vblank_notify(void *data, int crtc)
-{
- drm_handle_vblank(data, crtc);
-}
-
-static void
-nouveau_display_vblank_get(void *data, int crtc)
-{
- drm_vblank_get(data, crtc);
-}
-
-static void
-nouveau_display_vblank_put(void *data, int crtc)
-{
- drm_vblank_put(data, crtc);
-}
-
int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
u32 pclass = dev->pdev->class >> 8;
int ret, gen;
@@ -288,11 +275,6 @@ nouveau_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- pdisp->vblank.data = dev;
- pdisp->vblank.notify = nouveau_display_vblank_notify;
- pdisp->vblank.get = nouveau_display_vblank_get;
- pdisp->vblank.put = nouveau_display_vblank_put;
-
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
drm_mode_create_dvi_i_properties(dev);
@@ -316,17 +298,13 @@ nouveau_display_create(struct drm_device *dev)
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
if (gen >= 1) {
+ /* -90..+90 */
disp->vibrant_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vibrant hue", 2);
- disp->vibrant_hue_property->values[0] = 0;
- disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ /* -100..+100 */
disp->color_vibrance_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "color vibrance", 2);
- disp->color_vibrance_property->values[0] = 0;
- disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
}
dev->mode_config.funcs = &nouveau_mode_config_funcs;
@@ -478,39 +456,6 @@ nouveau_display_resume(struct drm_device *dev)
}
}
-int
-nouveau_vblank_enable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
- NV_PCRTC_INTR_0_VBLANK);
-
- return 0;
-}
-
-void
-nouveau_vblank_disable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
-}
-
static int
nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo)
@@ -595,7 +540,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence);
+ ret = nouveau_fence_new(chan, false, pfence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 722548bb3bd..1ea3e4734b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -59,9 +59,6 @@ void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
-int nouveau_vblank_enable(struct drm_device *dev, int crtc);
-void nouveau_vblank_disable(struct drm_device *dev, int crtc);
-
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 5c2e22932d1..690d5930ce3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -191,7 +191,7 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
-#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
+#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 59838651ee8..36fd2250056 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -35,300 +35,6 @@
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-/******************************************************************************
- * link training
- *****************************************************************************/
-struct dp_state {
- struct nouveau_i2c_port *auxch;
- struct nouveau_object *core;
- struct dcb_output *dcb;
- int crtc;
- u8 *dpcd;
- int link_nr;
- u32 link_bw;
- u8 stat[6];
- u8 conf[4];
-};
-
-static void
-dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink[2];
- u32 data;
-
- NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
-
- /* set desired link configuration on the source */
- data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
-
- /* inform the sink of the new configuration */
- sink[0] = dp->link_bw / 27000;
- sink[1] = dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
-}
-
-static void
-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink_tp;
-
- NV_DEBUG(drm, "training pattern %d\n", pattern);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
-
- nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
- sink_tp &= ~DP_TRAINING_PATTERN_MASK;
- sink_tp |= pattern;
- nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
-}
-
-static int
-dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- int i;
-
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
- u8 lpre = (lane & 0x0c) >> 2;
- u8 lvsw = (lane & 0x03) >> 0;
-
- dp->conf[i] = (lpre << 3) | lvsw;
- if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
- dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
- if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
- dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
- NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
- }
-
- return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
-}
-
-static int
-dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret;
-
- udelay(delay);
-
- ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
- if (ret)
- return ret;
-
- NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
- return 0;
-}
-
-static int
-dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
-{
- bool cr_done = false, abort = false;
- int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
-
- do {
- if (dp_link_train_commit(dev, dp) ||
- dp_link_train_update(dev, dp, 100))
- break;
-
- cr_done = true;
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE)) {
- cr_done = false;
- if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
- abort = true;
- break;
- }
- }
-
- if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- tries = 0;
- }
- } while (!cr_done && !abort && ++tries < 5);
-
- return cr_done ? 0 : -1;
-}
-
-static int
-dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
-{
- bool eq_done, cr_done = true;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
-
- do {
- if (dp_link_train_update(dev, dp, 400))
- break;
-
- eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
- for (i = 0; i < dp->link_nr && eq_done; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE))
- cr_done = false;
- if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
- !(lane & DP_LANE_SYMBOL_LOCKED))
- eq_done = false;
- }
-
- if (dp_link_train_commit(dev, dp))
- break;
- } while (!eq_done && cr_done && ++tries <= 5);
-
- return eq_done ? 0 : -1;
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
- NV94_DISP_SOR_DP_TRAIN_OP_INIT);
-}
-
-static void
-dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
- NV94_DISP_SOR_DP_TRAIN_OP_FINI);
-}
-
-static bool
-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector =
- nouveau_encoder_connector_get(nv_encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- const u32 bw_list[] = { 270000, 162000, 0 };
- const u32 *link_bw = bw_list;
- struct dp_state dp;
-
- dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!dp.auxch)
- return false;
-
- dp.core = core;
- dp.dcb = nv_encoder->dcb;
- dp.crtc = nv_crtc->index;
- dp.dpcd = nv_encoder->dp.dpcd;
-
- /* adjust required bandwidth for 8B/10B coding overhead */
- datarate = (datarate / 8) * 10;
-
- /* some sinks toggle hotplug in response to some of the actions
- * we take during link training (DP_SET_POWER is one), we need
- * to ignore them for the moment to avoid races.
- */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
-
- /* enable down-spreading and execute pre-train script from vbios */
- dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* start off at highest link rate supported by encoder and display */
- while (*link_bw > nv_encoder->dp.link_bw)
- link_bw++;
-
- while (link_bw[0]) {
- /* find minimum required lane count at this link rate */
- dp.link_nr = nv_encoder->dp.link_nr;
- while ((dp.link_nr >> 1) * link_bw[0] > datarate)
- dp.link_nr >>= 1;
-
- /* drop link rate to minimum with this lane count */
- while ((link_bw[1] * dp.link_nr) > datarate)
- link_bw++;
- dp.link_bw = link_bw[0];
-
- /* program selected link configuration */
- dp_set_link_config(dev, &dp);
-
- /* attempt to train the link at this configuration */
- memset(dp.stat, 0x00, sizeof(dp.stat));
- if (!dp_link_train_cr(dev, &dp) &&
- !dp_link_train_eq(dev, &dp))
- break;
-
- /* retry at lower rate */
- link_bw++;
- }
-
- /* finish link training */
- dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
-
- /* execute post-train script from vbios */
- dp_link_train_fini(dev, &dp);
-
- /* re-enable hotplug detect */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
- return true;
-}
-
-void
-nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_i2c_port *auxch;
- u8 status;
-
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return;
-
- if (mode == DRM_MODE_DPMS_ON)
- status = DP_SET_POWER_D0;
- else
- status = DP_SET_POWER_D3;
-
- nv_wraux(auxch, DP_SET_POWER, &status, 1);
-
- if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, core);
-}
-
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
u8 *dpcd)
@@ -355,12 +61,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *auxch;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ auxch = nv_encoder->i2c;
if (!auxch)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5e7aef23825..d1099365bfc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -34,6 +34,8 @@
#include <subdev/device.h>
#include <subdev/vm.h>
+#include <engine/disp.h>
+
#include "nouveau_drm.h"
#include "nouveau_irq.h"
#include "nouveau_dma.h"
@@ -48,6 +50,7 @@
#include "nouveau_abi16.h"
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
+#include "nouveau_debugfs.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -68,6 +71,32 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
+static int
+nouveau_drm_vblank_enable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+ return 0;
+}
+
+static void
+nouveau_drm_vblank_disable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_put(pdisp->vblank, head, &drm->vblank);
+}
+
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_drm *drm =
+ container_of(event, struct nouveau_drm, vblank);
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
static u64
nouveau_name(struct pci_dev *pdev)
{
@@ -132,7 +161,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x17) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
@@ -262,6 +292,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ drm->vblank.func = nouveau_drm_vblank_handler;
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -401,7 +432,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
nouveau_object_debug();
}
-int
+static int
nouveau_do_suspend(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -472,7 +503,7 @@ int nouveau_pmops_suspend(struct device *dev)
return 0;
}
-int
+static int
nouveau_do_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -546,10 +577,11 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[16];
+ char name[32], tmpname[TASK_COMM_LEN];
int ret;
- snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid));
+ get_task_comm(tmpname, current);
+ snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
if (ret)
@@ -639,22 +671,32 @@ driver = {
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = nouveau_debugfs_init,
+ .debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+
.irq_preinstall = nouveau_irq_preinstall,
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = nouveau_vblank_enable,
- .disable_vblank = nouveau_vblank_disable,
+ .enable_vblank = nouveau_drm_vblank_enable,
+ .disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = nouveau_gem_prime_export,
- .gem_prime_import = nouveau_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = nouveau_gem_prime_pin,
+ .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+ .gem_prime_vmap = nouveau_gem_prime_vmap,
+ .gem_prime_vunmap = nouveau_gem_prime_vunmap,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index aa89eb938b4..b25df374c90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -13,6 +13,7 @@
#define DRIVER_PATCHLEVEL 0
#include <core/client.h>
+#include <core/event.h>
#include <subdev/vm.h>
@@ -112,6 +113,7 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct nouveau_eventh vblank;
/* power management */
struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index d0d95bd511a..e24341229d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -36,19 +36,12 @@
struct nouveau_i2c_port;
-struct dp_train_func {
- void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
- int nr, u32 bw, bool enhframe);
- void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
- void (*train_adj)(struct drm_device *, struct dcb_output *,
- u8 lane, u8 swing, u8 preem);
-};
-
struct nouveau_encoder {
struct drm_encoder_slave base;
struct dcb_output *dcb;
int or;
+ struct nouveau_i2c_port *i2c;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 67a1a069de2..b0353178158 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -251,9 +251,10 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
}
static int
-nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
+nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
@@ -388,23 +389,6 @@ out:
return ret;
}
-static int
-nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = nouveau_fbcon_create(fbcon, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
@@ -433,6 +417,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&fbcon->helper);
+ drm_framebuffer_unregister_private(&nouveau_fb->base);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
@@ -449,7 +434,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
- .fb_probe = nouveau_fbcon_find_or_create_single,
+ .fb_probe = nouveau_fbcon_create,
};
@@ -490,6 +475,9 @@ nouveau_fbcon_init(struct drm_device *dev)
else
preferred_bpp = 32;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1d049be79f7..6c946837a0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -33,14 +33,14 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include <engine/fifo.h>
+
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (fence->work)
- fence->work(fence->priv, false);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -59,17 +59,14 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
static void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (priv->read(chan) < fence->sequence)
+ if (fctx->read(chan) < fence->sequence)
break;
- if (fence->work)
- fence->work(fence->priv, true);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -80,7 +77,6 @@ nouveau_fence_update(struct nouveau_channel *chan)
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
int ret;
@@ -88,7 +84,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (3 * DRM_HZ);
fence->sequence = ++fctx->sequence;
- ret = priv->emit(fence);
+ ret = fctx->emit(fence);
if (!ret) {
kref_get(&fence->kref);
spin_lock(&fctx->lock);
@@ -107,13 +103,87 @@ nouveau_fence_done(struct nouveau_fence *fence)
return !fence->channel;
}
+struct nouveau_fence_uevent {
+ struct nouveau_eventh handler;
+ struct nouveau_fence_priv *priv;
+};
+
+static int
+nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_fence_uevent *uevent =
+ container_of(event, struct nouveau_fence_uevent, handler);
+ wake_up_all(&uevent->priv->waiting);
+ return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
+ struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_uevent uevent = {
+ .handler.func = nouveau_fence_wait_uevent_handler,
+ .priv = priv,
+ };
+ int ret = 0;
+
+ nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+
+ if (fence->timeout) {
+ unsigned long timeout = fence->timeout - jiffies;
+
+ if (time_before(jiffies, fence->timeout)) {
+ if (intr) {
+ ret = wait_event_interruptible_timeout(
+ priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ } else {
+ ret = wait_event_timeout(priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ }
+ }
+
+ if (ret >= 0) {
+ fence->timeout = jiffies + ret;
+ if (time_after_eq(jiffies, fence->timeout))
+ ret = -EBUSY;
+ }
+ } else {
+ if (intr) {
+ ret = wait_event_interruptible(priv->waiting,
+ nouveau_fence_done(fence));
+ } else {
+ wait_event(priv->waiting, nouveau_fence_done(fence));
+ }
+ }
+
+ nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
+ while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
+ ret = nouveau_fence_wait_uevent(fence, intr);
+ if (ret < 0)
+ return ret;
+ }
+
while (!nouveau_fence_done(fence)) {
if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
@@ -143,14 +213,14 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
int
nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_channel *prev;
int ret = 0;
prev = fence ? fence->channel : NULL;
if (prev) {
if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
- ret = priv->sync(fence, prev, chan);
+ ret = fctx->sync(fence, prev, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
@@ -182,7 +252,8 @@ nouveau_fence_ref(struct nouveau_fence *fence)
}
int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
+ struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
int ret = 0;
@@ -193,13 +264,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+
+ fence->sysmem = sysmem;
kref_init(&fence->kref);
- if (chan) {
- ret = nouveau_fence_emit(fence, chan);
- if (ret)
- nouveau_fence_unref(&fence);
- }
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret)
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index cdb83acdffe..c89943407b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -7,15 +7,15 @@ struct nouveau_fence {
struct list_head head;
struct kref kref;
+ bool sysmem;
+
struct nouveau_channel *channel;
unsigned long timeout;
u32 sequence;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
};
-int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
+ struct nouveau_fence **);
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *);
void nouveau_fence_unref(struct nouveau_fence **);
@@ -29,6 +29,13 @@ struct nouveau_fence_chan {
struct list_head pending;
struct list_head flip;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+ int (*emit32)(struct nouveau_channel *, u64, u32);
+ int (*sync32)(struct nouveau_channel *, u64, u32);
+
spinlock_t lock;
u32 sequence;
};
@@ -39,10 +46,9 @@ struct nouveau_fence_priv {
void (*resume)(struct nouveau_drm *);
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- int (*emit)(struct nouveau_fence *);
- int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
- struct nouveau_channel *);
- u32 (*read)(struct nouveau_channel *);
+
+ wait_queue_head_t waiting;
+ bool uevent;
};
#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
@@ -60,13 +66,31 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+
+int nv17_fence_create(struct nouveau_drm *);
void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
-u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
int nouveau_flip_complete(void *chan);
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+ struct nouveau_vma vma_gart;
+ struct nouveau_vma dispc_vma[4];
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ struct nouveau_bo *bo_gart;
+ u32 *suspend;
+};
+
+u64 nv84_fence_crtc(struct nouveau_channel *, int);
+int nv84_fence_context_new(struct nouveau_channel *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8bf695c52f9..b4b4d0c1f4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/dma-buf.h>
-
#include <subdev/fb.h>
#include "nouveau_drm.h"
@@ -205,6 +203,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
@@ -213,7 +212,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -315,16 +314,18 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *dev = chan->drm->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
uint32_t sequence;
int trycnt = 0;
int ret, i;
+ struct nouveau_bo *res_bo = NULL;
sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
- NV_ERROR(drm, "%s failed and gave up.\n", __func__);
+ NV_ERROR(cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -335,14 +336,19 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
+ NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -ENOENT;
}
nvbo = gem->driver_private;
+ if (nvbo == res_bo) {
+ res_bo = NULL;
+ drm_gem_object_unreference_unlocked(gem);
+ continue;
+ }
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(drm, "multiple instances of buffer %d on "
+ NV_ERROR(cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
@@ -352,15 +358,19 @@ retry:
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (unlikely(ret == -EAGAIN))
- ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
- drm_gem_object_unreference_unlocked(gem);
+ if (unlikely(ret == -EAGAIN)) {
+ sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+ ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+ sequence);
+ if (!ret)
+ res_bo = nvbo;
+ }
if (unlikely(ret)) {
+ drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail reserve\n");
+ NV_ERROR(cli, "fail reserve\n");
return ret;
}
- goto retry;
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -376,12 +386,14 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
- NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
+ NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
+ if (nvbo == res_bo)
+ goto retry;
}
return 0;
@@ -407,8 +419,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
}
static int
-validate_list(struct nouveau_channel *chan, struct list_head *list,
- struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ uint64_t user_pbbo_ptr)
{
struct nouveau_drm *drm = chan->drm;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
@@ -421,7 +434,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail pre-validate sync\n");
+ NV_ERROR(cli, "fail pre-validate sync\n");
return ret;
}
@@ -429,20 +442,20 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail set_domain\n");
+ NV_ERROR(cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail ttm_validate\n");
+ NV_ERROR(cli, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail post-validate sync\n");
+ NV_ERROR(cli, "fail post-validate sync\n");
return ret;
}
@@ -478,7 +491,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -491,32 +504,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate_init\n");
+ NV_ERROR(cli, "validate_init\n");
return ret;
}
- ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate vram_list\n");
+ NV_ERROR(cli, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate gart_list\n");
+ NV_ERROR(cli, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate both_list\n");
+ NV_ERROR(cli, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -545,11 +558,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
@@ -565,7 +577,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc bo index invalid\n");
+ NV_ERROR(cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -575,7 +587,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc container bo index invalid\n");
+ NV_ERROR(cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -583,7 +595,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(drm, "reloc outside of bo\n");
+ NV_ERROR(cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -592,7 +604,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(drm, "failed kmap for reloc\n");
+ NV_ERROR(cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -617,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
- NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
+ NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -633,6 +645,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *temp;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
@@ -662,19 +675,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -692,7 +705,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(drm, "push %d buffer not in list\n", i);
+ NV_ERROR(cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -703,15 +716,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate: %d\n", ret);
+ NV_ERROR(cli, "validate: %d\n", ret);
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(drm, "reloc apply: %d\n", ret);
+ NV_ERROR(cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -719,7 +732,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(drm, "nv50cal_space: %d\n", ret);
+ NV_ERROR(cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -734,7 +747,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (nv_device(drm->device)->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(drm, "cal_space: %d\n", ret);
+ NV_ERROR(cli, "cal_space: %d\n", ret);
goto out;
}
@@ -748,7 +761,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(drm, "jmp_space: %d\n", ret);
+ NV_ERROR(cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -784,9 +797,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
+ NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 5c1049236d2..8d7a3f0aeb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,9 +35,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
-extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
+extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
+ struct drm_device *, size_t size, struct sg_table *);
+extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
+extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a701ff5ffa5..bb54098c6d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -409,6 +409,81 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
NULL, 0);
static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+ nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp,
+ nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp_hyst,
+ nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -439,6 +514,38 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
0);
static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_max_temp_hyst,
+ nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
char *buf)
{
@@ -471,6 +578,107 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
nouveau_hwmon_set_critical_temp,
0);
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_critical_temp_hyst,
+ nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp,
+ nouveau_hwmon_set_emergency_temp,
+ 0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp_hyst,
+ nouveau_hwmon_set_emergency_temp_hyst,
+ 0);
+
static ssize_t nouveau_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -490,7 +698,7 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
NULL, 0);
static ssize_t
-nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -499,7 +707,7 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
-static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
NULL, 0);
static ssize_t
@@ -665,14 +873,21 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
static struct attribute *hwmon_fan_rpm_attributes[] = {
- &sensor_dev_attr_fan0_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
static struct attribute *hwmon_pwm_fan_attributes[] = {
@@ -717,7 +932,7 @@ nouveau_hwmon_init(struct drm_device *dev)
dev_set_drvdata(hwmon_dev, dev);
/* default sysfs entries */
- ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
if (ret) {
if (ret)
goto error;
@@ -728,7 +943,7 @@ nouveau_hwmon_init(struct drm_device *dev)
* the gpio entries for pwm fan control even when there's no
* actual fan connected to it... therm table? */
if (therm->fan_get && therm->fan_get(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_pwm_fan_attrgroup);
if (ret)
goto error;
@@ -736,7 +951,7 @@ nouveau_hwmon_init(struct drm_device *dev)
/* if the card can read the fan rpm */
if (therm->fan_sense(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_fan_rpm_attrgroup);
if (ret)
goto error;
@@ -764,10 +979,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
struct nouveau_pm *pm = nouveau_pm(dev);
if (pm->hwmon) {
- sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_fan_rpm_attrgroup);
hwmon_device_unregister(pm->hwmon);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b8e05ae3821..f53e10874ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,126 +22,42 @@
* Authors: Dave Airlie
*/
-#include <linux/dma-buf.h>
-
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_gem.h"
-static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct nouveau_bo *nvbo = attachment->dmabuf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
-
- if (nvbo->gem->export_dma_buf == dma_buf) {
- nvbo->gem->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(nvbo->gem);
- }
-}
-
-static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-}
-static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
+ return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
}
-static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
{
-
-}
-
-static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- return -EINVAL;
-}
-
-static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (nvbo->vmapping_count) {
- nvbo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
&nvbo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- nvbo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return nvbo->dma_buf_vmap.virtual;
}
-static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- mutex_lock(&dev->struct_mutex);
- nvbo->vmapping_count--;
- if (nvbo->vmapping_count == 0) {
- ttm_bo_kunmap(&nvbo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
}
-static const struct dma_buf_ops nouveau_dmabuf_ops = {
- .map_dma_buf = nouveau_gem_map_dma_buf,
- .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
- .release = nouveau_gem_dmabuf_release,
- .kmap = nouveau_gem_kmap,
- .kmap_atomic = nouveau_gem_kmap_atomic,
- .kunmap = nouveau_gem_kunmap,
- .kunmap_atomic = nouveau_gem_kunmap_atomic,
- .mmap = nouveau_gem_prime_mmap,
- .vmap = nouveau_gem_prime_vmap,
- .vunmap = nouveau_gem_prime_vunmap,
-};
-
-static int
-nouveau_prime_new(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct nouveau_bo **pnvbo)
+struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct nouveau_bo *nvbo;
u32 flags = 0;
@@ -150,24 +66,22 @@ nouveau_prime_new(struct drm_device *dev,
flags = TTM_PL_FLAG_TT;
ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
- sg, pnvbo);
+ sg, &nvbo);
if (ret)
- return ret;
- nvbo = *pnvbo;
+ return ERR_PTR(ret);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
- nouveau_bo_ref(NULL, pnvbo);
- return -ENOMEM;
+ nouveau_bo_ref(NULL, &nvbo);
+ return ERR_PTR(-ENOMEM);
}
nvbo->gem->driver_private = nvbo;
- return 0;
+ return nvbo->gem;
}
-struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+int nouveau_gem_prime_pin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret = 0;
@@ -175,52 +89,7 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
/* pin buffer into GTT */
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
if (ret)
- return ERR_PTR(-EINVAL);
-
- return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
-}
-
-struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct nouveau_bo *nvbo;
- int ret;
-
- if (dma_buf->ops == &nouveau_dmabuf_ops) {
- nvbo = dma_buf->priv;
- if (nvbo->gem) {
- if (nvbo->gem->dev == dev) {
- drm_gem_object_reference(nvbo->gem);
- dma_buf_put(dma_buf);
- return nvbo->gem;
- }
- }
- }
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_PTR(PTR_ERR(attach));
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
- if (ret)
- goto fail_unmap;
-
- nvbo->gem->import_attach = attach;
-
- return nvbo->gem;
+ return -EINVAL;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 39ffc07f906..7e24cdf1cb3 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329) {
+ if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
+ dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 2cd6fb8c548..ad48444c385 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -22,6 +22,9 @@
* Author: Ben Skeggs
*/
+#include <core/object.h>
+#include <core/class.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -31,6 +34,8 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include <subdev/i2c.h>
+
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -53,6 +58,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -71,6 +77,11 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
+ NV04_DISP_CLASS, NULL, 0, &disp->core);
+ if (ret)
+ return ret;
+
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
@@ -114,6 +125,11 @@ nv04_display_create(struct drm_device *dev)
}
}
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ }
+
/* Save previous state */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -140,7 +156,7 @@ nv04_display_destroy(struct drm_device *dev)
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
/* Restore state */
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
index 45322802e37..a0a031dad13 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -80,6 +80,7 @@ struct nv04_display {
struct nv04_mode_state saved_reg;
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
+ struct nouveau_object *core;
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index a220b94ba9f..94eadd1dd10 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -78,6 +78,9 @@ nv04_fence_context_new(struct nouveau_channel *chan)
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv04_fence_emit;
+ fctx->base.sync = nv04_fence_sync;
+ fctx->base.read = nv04_fence_read;
chan->fence = fctx;
return 0;
}
@@ -104,8 +107,5 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv04_fence_destroy;
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
- priv->base.emit = nv04_fence_emit;
- priv->base.sync = nv04_fence_sync;
- priv->base.read = nv04_fence_read;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 62e826a139b..4a69ccdef9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -184,14 +184,23 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
.destroy = nv04_tv_destroy,
};
+static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
+ .dpms = nv04_tv_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = nv04_tv_prepare,
+ .commit = nv04_tv_commit,
+ .mode_set = nv04_tv_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
int
nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
- struct drm_encoder_helper_funcs *hfuncs;
- struct drm_encoder_slave_funcs *sfuncs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
@@ -207,17 +216,11 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (!nv_encoder)
return -ENOMEM;
- hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
- if (!hfuncs) {
- ret = -ENOMEM;
- goto fail_free;
- }
-
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, hfuncs);
+ drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
@@ -230,30 +233,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (ret < 0)
goto fail_cleanup;
- /* Fill the function pointers */
- sfuncs = get_slave_funcs(encoder);
-
- *hfuncs = (struct drm_encoder_helper_funcs) {
- .dpms = nv04_tv_dpms,
- .save = sfuncs->save,
- .restore = sfuncs->restore,
- .mode_fixup = sfuncs->mode_fixup,
- .prepare = nv04_tv_prepare,
- .commit = nv04_tv_commit,
- .mode_set = nv04_tv_mode_set,
- .detect = sfuncs->detect,
- };
-
/* Attach it to the specified connector. */
- sfuncs->create_resources(encoder, connector);
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
fail_cleanup:
drm_encoder_cleanup(encoder);
- kfree(hfuncs);
-fail_free:
kfree(nv_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 03017f24d59..06f434f03fb 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -27,18 +27,7 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
-
-struct nv10_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv10_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
+#include "nv10_fence.h"
int
nv10_fence_emit(struct nouveau_fence *fence)
@@ -61,45 +50,6 @@ nv10_fence_sync(struct nouveau_fence *fence,
return -ENODEV;
}
-int
-nv17_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
-{
- struct nv10_fence_priv *priv = chan->drm->fence;
- u32 value;
- int ret;
-
- if (!mutex_trylock(&prev->cli->mutex))
- return -EBUSY;
-
- spin_lock(&priv->lock);
- value = priv->sequence;
- priv->sequence += 2;
- spin_unlock(&priv->lock);
-
- ret = RING_SPACE(prev, 5);
- if (!ret) {
- BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, NvSema);
- OUT_RING (prev, 0);
- OUT_RING (prev, value + 0);
- OUT_RING (prev, value + 1);
- FIRE_RING (prev);
- }
-
- if (!ret && !(ret = RING_SPACE(chan, 5))) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, 0);
- OUT_RING (chan, value + 1);
- OUT_RING (chan, value + 2);
- FIRE_RING (chan);
- }
-
- mutex_unlock(&prev->cli->mutex);
- return 0;
-}
-
u32
nv10_fence_read(struct nouveau_channel *chan)
{
@@ -115,39 +65,20 @@ nv10_fence_context_del(struct nouveau_channel *chan)
kfree(fctx);
}
-static int
+int
nv10_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
-
- if (priv->bo) {
- struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
- u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
-
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
- }
-
- if (ret)
- nv10_fence_context_del(chan);
- return ret;
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv10_fence_sync;
+ return 0;
}
void
@@ -162,18 +93,10 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
-void nv17_fence_resume(struct nouveau_drm *drm)
-{
- struct nv10_fence_priv *priv = drm->fence;
-
- nouveau_bo_wr32(priv->bo, 0, priv->sequence);
-}
-
int
nv10_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
- int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -182,33 +105,6 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv10_fence_destroy;
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv10_fence_sync;
spin_lock_init(&priv->lock);
-
- if (nv_device(drm->device)->chipset >= 0x17) {
- ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
- }
-
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
- }
- }
-
- if (ret)
- nv10_fence_destroy(drm);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
new file mode 100644
index 00000000000..e5d9204826c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -0,0 +1,19 @@
+#ifndef __NV10_FENCE_H_
+#define __NV10_FENCE_H_
+
+#include <core/os.h>
+#include "nouveau_fence.h"
+#include "nouveau_bo.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
new file mode 100644
index 00000000000..8e47a9bae8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->cli->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->cli->mutex);
+ return 0;
+}
+
+static int
+nv17_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+ struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = mem->start + mem->size - 1;
+ int ret = 0;
+
+ fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = start,
+ .limit = limit,
+ }, sizeof(struct nv_dma_class),
+ &object);
+ if (ret)
+ nv10_fence_context_del(chan);
+ return ret;
+}
+
+void
+nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
+int
+nv17_fence_create(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
+ priv->base.context_new = nv17_fence_context_new;
+ priv->base.context_del = nv10_fence_context_del;
+ spin_lock_init(&priv->lock);
+
+ ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
+ }
+
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 2ca276ada50..977e42be205 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -768,7 +768,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 35874085a61..a6237c9cbbc 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -43,6 +43,7 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/i2c.h>
#define EVO_DMA_NR 9
@@ -128,6 +129,11 @@ struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
u32 *ptr;
+
+ /* Protects against concurrent pushbuf access to this channel, lock is
+ * grabbed by evo_wait (if the pushbuf reservation is successful) and
+ * dropped again by evo_kick. */
+ struct mutex lock;
};
static void
@@ -271,6 +277,8 @@ nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
u32 pushbuf = *(u32 *)data;
int ret;
+ mutex_init(&dmac->lock);
+
dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
&dmac->handle);
if (!dmac->ptr)
@@ -395,11 +403,13 @@ evo_wait(void *evoc, int nr)
struct nv50_dmac *dmac = evoc;
u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
nv_wo32(dmac->base.user, 0x0000, 0x00000000);
if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ mutex_unlock(&dmac->lock);
NV_ERROR(dmac->base.user, "channel stalled\n");
return NULL;
}
@@ -415,6 +425,7 @@ evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ mutex_unlock(&dmac->lock);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -423,7 +434,10 @@ evo_kick(u32 *push, void *evoc)
static bool
evo_sync_wait(void *data)
{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+ if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
+ return true;
+ usleep_range(1, 2);
+ return false;
}
static int
@@ -502,7 +516,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->sem.offset);
@@ -512,24 +526,36 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
- OUT_RING (chan, NvSema);
- else
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, NvSema);
+ } else
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
+ offset += sync->sem.offset;
+
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ OUT_RING (chan, 0x00000002);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x00000001);
} else {
- u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
offset += sync->sem.offset;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x1002);
+ OUT_RING (chan, 0x00001002);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
+ OUT_RING (chan, 0x00001001);
}
FIRE_RING (chan);
@@ -1493,9 +1519,6 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0180 + (or * 0x020), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
}
@@ -1542,20 +1565,23 @@ static const struct drm_encoder_funcs nv50_dac_func = {
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1664,9 +1690,6 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
}
static bool
@@ -1709,9 +1732,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0200 + (or * 0x20), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
@@ -1723,14 +1743,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
}
static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- nv50_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev);
-}
-
-static void
nv50_sor_commit(struct drm_encoder *encoder)
{
}
@@ -1825,8 +1837,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
push = evo_wait(nv50_mast(dev), 8);
if (push) {
if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
- evo_data(push, (depth << 16) | (proto << 8) | owner);
+ evo_data(push, ctrl);
} else {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
u32 syncs = 0x00000001;
@@ -1862,7 +1879,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
.dpms = nv50_sor_dpms,
.mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
+ .prepare = nv50_sor_disconnect,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
.disable = nv50_sor_disconnect,
@@ -1876,21 +1893,33 @@ static const struct drm_encoder_funcs nv50_sor_func = {
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ default:
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ }
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1898,6 +1927,181 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
/******************************************************************************
+ * PIOR
+ *****************************************************************************/
+
+static void
+nv50_pior_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
+ u32 ctrl = (mode == DRM_MODE_DPMS_ON);
+ nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+}
+
+static bool
+nv50_pior_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ adjusted_mode->clock *= 2;
+ return true;
+}
+
+static void
+nv50_pior_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto, depth;
+ u32 *push;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_connector->base.display_info.bpc) {
+ case 10: depth = 0x6; break;
+ case 8: depth = 0x5; break;
+ case 6: depth = 0x2; break;
+ default: depth = 0x0; break;
+ }
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ proto = 0x0;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
+ evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, ctrl);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_pior_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_pior_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
+ .dpms = nv50_pior_dpms,
+ .mode_fixup = nv50_pior_mode_fixup,
+ .prepare = nv50_pior_disconnect,
+ .commit = nv50_pior_commit,
+ .mode_set = nv50_pior_mode_set,
+ .disable = nv50_pior_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_pior_func = {
+ .destroy = nv50_pior_destroy,
+};
+
+static int
+nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c_port *ddc = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case DCB_OUTPUT_DP:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = ddc;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
+ drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
* Init
*****************************************************************************/
void
@@ -1913,7 +2117,7 @@ nv50_display_init(struct drm_device *dev)
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_kick(push, nv50_mast(dev));
- return evo_sync(dev);
+ return 0;
}
return -EBUSY;
@@ -2019,25 +2223,28 @@ nv50_display_create(struct drm_device *dev)
if (IS_ERR(connector))
continue;
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (dcbe->location == DCB_LOC_ON_CHIP) {
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ ret = nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ ret = nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ } else {
+ ret = nv50_pior_create(connector, dcbe);
}
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (ret) {
+ NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+ dcbe->location, dcbe->type,
+ ffs(dcbe->or) - 1, ret);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index d889f3ac0d4..f9701e567db 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -27,27 +27,16 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
+#include "nv10_fence.h"
#include "nv50_display.h"
-struct nv50_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv50_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
-
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
- struct nv50_fence_priv *priv = chan->drm->fence;
- struct nv50_fence_chan *fctx;
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
int ret, i;
@@ -57,6 +46,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvSema, 0x0002,
@@ -91,7 +83,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
int
nv50_fence_create(struct nouveau_drm *drm)
{
- struct nv50_fence_priv *priv;
+ struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -99,11 +91,9 @@ nv50_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv17_fence_sync;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -119,13 +109,11 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
}
- if (ret)
- nv10_fence_destroy(drm);
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index c686650584b..9fd475c8982 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/class.h>
#include <engine/fifo.h>
@@ -33,79 +34,115 @@
#include "nv50_display.h"
-struct nv84_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv84_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_gpuobj *mem;
-};
+u64
+nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ return fctx->dispc_vma[crtc].offset;
+}
static int
-nv84_fence_emit(struct nouveau_fence *fence)
+nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- int ret = RING_SPACE(chan, 7);
+ int ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, chan->vram);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
return ret;
}
-
static int
-nv84_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
FIRE_RING (chan);
}
return ret;
}
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.emit32(chan, addr, fence->sequence);
+}
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)prev->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.sync32(chan, addr, fence->sequence);
+}
+
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nv_ro32(priv->mem, fifo->chid * 16);
+ return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->drm->dev;
+ struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+ }
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
kfree(fctx);
}
-static int
+int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_client *client = nouveau_client(fifo);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- struct nouveau_object *object;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -113,44 +150,74 @@ nv84_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv84_fence_emit;
+ fctx->base.sync = nv84_fence_sync;
+ fctx->base.read = nv84_fence_read;
+ fctx->base.emit32 = nv84_fence_emit32;
+ fctx->base.sync32 = nv84_fence_sync32;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = priv->mem->addr,
- .limit = priv->mem->addr +
- priv->mem->size - 1,
- }, sizeof(struct nv_dma_class),
- &object);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ if (ret == 0) {
+ ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+ &fctx->vma_gart);
+ }
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
- }, sizeof(struct nv_dma_class),
- &object);
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
+ ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
+ nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+
if (ret)
nv84_fence_context_del(chan);
- nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
return ret;
}
+static bool
+nv84_fence_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
+ }
+
+ return priv->suspend != NULL;
+}
+
+static void
+nv84_fence_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
+ vfree(priv->suspend);
+ priv->suspend = NULL;
+ }
+}
+
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_gpuobj_ref(NULL, &priv->mem);
+ nouveau_bo_unmap(priv->bo_gart);
+ if (priv->bo_gart)
+ nouveau_bo_unpin(priv->bo_gart);
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -160,7 +227,6 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv;
- u32 chan = pfifo->max + 1;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -168,14 +234,42 @@ nv84_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv84_fence_destroy;
+ priv->base.suspend = nv84_fence_suspend;
+ priv->base.resume = nv84_fence_resume;
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.emit = nv84_fence_emit;
- priv->base.sync = nv84_fence_sync;
- priv->base.read = nv84_fence_read;
- ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
- &priv->mem);
+ init_waitqueue_head(&priv->base.waiting);
+ priv->base.uevent = true;
+
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0)
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_TT, 0, 0, NULL,
+ &priv->bo_gart);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo_gart);
+ if (ret)
+ nouveau_bo_unpin(priv->bo_gart);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ }
+
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2a56b1b551c..9566267fbc4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,203 +34,57 @@
#include "nv50_display.h"
-struct nvc0_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- u32 *suspend;
-};
-
-struct nvc0_fence_chan {
- struct nouveau_fence_chan base;
- struct nouveau_vma vma;
- struct nouveau_vma dispc_vma[4];
-};
-
-u64
-nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nvc0_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
-nvc0_fence_emit(struct nouveau_fence *fence)
+nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 6);
if (ret == 0) {
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
-
return ret;
}
static int
-nvc0_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 5);
if (ret == 0) {
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
FIRE_RING (chan);
}
-
return ret;
}
-static u32
-nvc0_fence_read(struct nouveau_channel *chan)
-{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
-}
-
-static void
-nvc0_fence_context_del(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->drm->dev;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
-
- nouveau_bo_vma_del(priv->bo, &fctx->vma);
- nouveau_fence_context_del(&fctx->base);
- chan->fence = NULL;
- kfree(fctx);
-}
-
static int
nvc0_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nouveau_client *client = nouveau_client(fifo);
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx;
- int ret, i;
-
- fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- nouveau_fence_context_new(&fctx->base);
-
- ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
- if (ret)
- nvc0_fence_context_del(chan);
-
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+ int ret = nv84_fence_context_new(chan);
+ if (ret == 0) {
+ struct nv84_fence_chan *fctx = chan->fence;
+ fctx->base.emit32 = nvc0_fence_emit32;
+ fctx->base.sync32 = nvc0_fence_sync32;
}
-
- nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
return ret;
}
-static bool
-nvc0_fence_suspend(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
- }
-
- return priv->suspend != NULL;
-}
-
-static void
-nvc0_fence_resume(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
- vfree(priv->suspend);
- priv->suspend = NULL;
- }
-}
-
-static void
-nvc0_fence_destroy(struct nouveau_drm *drm)
-{
- struct nvc0_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
- drm->fence = NULL;
- kfree(priv);
-}
-
int
nvc0_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv;
- int ret;
-
- priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.dtor = nvc0_fence_destroy;
- priv->base.suspend = nvc0_fence_suspend;
- priv->base.resume = nvc0_fence_resume;
- priv->base.context_new = nvc0_fence_context_new;
- priv->base.context_del = nvc0_fence_context_del;
- priv->base.emit = nvc0_fence_emit;
- priv->base.sync = nvc0_fence_sync;
- priv->base.read = nvc0_fence_read;
-
- ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ int ret = nv84_fence_create(drm);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ struct nv84_fence_priv *priv = drm->fence;
+ priv->base.context_new = nvc0_fence_context_new;
}
-
- if (ret)
- nvc0_fence_destroy(drm);
return ret;
}
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 09f65dc3d2c..09f65dc3d2c 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f284..d85e058f284 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 00000000000..4d8c18aa5dd
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
+TODO
+. Where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd.
+ . Use mm_shrinker to trigger unpinning pages.
+ . This is mainly theoretical since most of these devices don't actually
+ have swap or harddrive.
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+ . This can be handled by the dma-buf fence/reservation stuff when it
+ lands
+
+Userspace:
+. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
+. OMAP5432 uEVM
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 8979c80adb5..c451c41a7a7 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_connector.c
+ * drivers/gpu/drm/omapdrm/omap_connector.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 32109c09357..bec66a490b8 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_crtc.c
+ * drivers/gpu/drm/omapdrm/omap_crtc.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -274,17 +274,16 @@ static void page_flip_worker(struct work_struct *work)
struct omap_crtc *omap_crtc =
container_of(work, struct omap_crtc, page_flip_work);
struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
crtc->x << 16, crtc->y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
vblank_cb, crtc);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->fb, 0);
drm_gem_object_unreference_unlocked(bo);
@@ -417,7 +416,7 @@ static void apply_worker(struct work_struct *work)
* the callbacks and list modification all serialized
* with respect to modesetting ioctls from userspace.
*/
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
dispc_runtime_get();
/*
@@ -462,16 +461,15 @@ static void apply_worker(struct work_struct *work)
out:
dispc_runtime_put();
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
}
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
/* no need to queue it again if it is already queued: */
if (apply->queued)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2f122e00b51..c27f59da7f2 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_debugfs.c
+ * drivers/gpu/drm/omapdrm/omap_debugfs.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
@@ -57,21 +57,11 @@ static int fb_show(struct seq_file *m, void *arg)
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
seq_printf(m, "fbcon ");
omap_framebuffer_describe(priv->fbdev->fb, m);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == priv->fbdev->fb)
continue;
@@ -79,9 +69,7 @@ static int fb_show(struct seq_file *m, void *arg)
seq_printf(m, "user ");
omap_framebuffer_describe(fb, m);
}
-
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae025..58bcd6ae025 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c8..9b794c933c8 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd..4fdd61e54bd 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 480dc343446..079c54c6f94 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.c
+ * drivers/gpu/drm/omapdrm/omap_drv.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -452,9 +452,9 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (ret)
DBG("failed to restore crtc mode");
}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f921027e750..d4f997bb4ac 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.h
+ * drivers/gpu/drm/omapdrm/omap_drv.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/omap_drm.h>
#include <linux/platform_data/omap_drm.h>
-#include "omap_drm.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 25fc0c7b4f6..21d126d0317 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_encoder.c
+ * drivers/gpu/drm/omapdrm/omap_encoder.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index bb496994214..8031402e795 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fb.c
+ * drivers/gpu/drm/omapdrm/omap_fb.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -423,14 +423,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
- if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
- goto fail;
- }
-
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
-
omap_fb->format = format;
for (i = 0; i < n; i++) {
@@ -461,6 +453,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
return fb;
fail:
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 70f2d6ed2ed..b11ce609fcc 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fbdev.c
+ * drivers/gpu/drm/omapdrm/omap_fbdev.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -131,9 +131,6 @@ static struct fb_ops omap_fb_ops = {
.fb_pan_display = omap_fbdev_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
-
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -275,8 +272,10 @@ fail:
if (ret) {
if (fbi)
framebuffer_release(fbi);
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
return ret;
@@ -294,25 +293,10 @@ static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
-static int omap_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = omap_fbdev_create(helper, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.gamma_set = omap_crtc_fb_gamma_set,
.gamma_get = omap_crtc_fb_gamma_get,
- .fb_probe = omap_fbdev_probe,
+ .fb_probe = omap_fbdev_create,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
@@ -365,6 +349,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(helper, 32);
priv->fbdev = helper;
@@ -398,8 +386,10 @@ void omap_fbdev_free(struct drm_device *dev)
fbdev = to_omap_fbdev(priv->fbdev);
/* this will free the backing object */
- if (fbdev->fb)
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
+ }
kfree(fbdev);
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 518d03d4d4f..ebbdf4132e9 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem.c
+ * drivers/gpu/drm/omapdrm/omap_gem.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a3236abfca3..ac74d1bc67b 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_dmabuf.c
+ * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
index ffb8cceaeb4..e4a66a35fc6 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_helpers.c
+ * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 2629ba7be6c..e01303ee00c 100644
--- a/drivers/staging/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_irq.c
+ * drivers/gpu/drm/omapdrm/omap_irq.c
*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index c063476db3b..2882cda6ea1 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_plane.c
+ * drivers/gpu/drm/omapdrm/omap_plane.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb60951054..efb60951054 100644
--- a/drivers/staging/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f868671..0444f868671 100644
--- a/drivers/staging/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index a8d5ce47686..a8d5ce47686 100644
--- a/drivers/staging/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index ea92bbe3ed3..970f8e92dbb 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,31 +1,8 @@
-config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default - NEW DRIVER"
+config DRM_RADEON_UMS
+ bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
- select BACKLIGHT_CLASS_DEVICE
help
- Choose this option if you want kernel modesetting enabled by default.
+ Choose this option if you still need userspace modesetting.
- This is a completely new driver. It's only part of the existing drm
- for compatibility reasons. It requires an entirely different graphics
- stack above it and works very differently from the old drm stack.
- i.e. don't enable this unless you know what you are doing it may
- cause issues or bugs compared to the previous userspace driver stack.
-
- When kernel modesetting is enabled the IOCTL of radeon/drm
- driver are considered as invalid and an error message is printed
- in the log and they return failure.
-
- KMS enabled userspace will use new API to talk with the radeon/drm
- driver. The new API provide functions to create/destroy/share/mmap
- buffer object which are then managed by the kernel memory manager
- (here TTM). In order to submit command to the GPU the userspace
- provide a buffer holding the command stream, along this buffer
- userspace have to provide a list of buffer object used by the
- command stream. The kernel radeon driver will then place buffer
- in GPU accessible memory and will update command stream to reflect
- the position of the different buffers.
-
- The kernel will also perform security check on command stream
- provided by the user, we want to catch and forbid any illegal use
- of the GPU such as DMA into random system memory or into memory
- not owned by the process supplying the command stream.
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a6598fd6642..bf172522ea6 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
- r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf51a8d..46a9c377285 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1238,6 +1238,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ if (!ctx->iio)
+ return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@@ -1287,6 +1289,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+ if (!ctx->iio) {
+ atom_destroy(ctx);
+ return NULL;
+ }
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
@@ -1335,8 +1341,7 @@ int atom_asic_init(struct atom_context *ctx)
void atom_destroy(struct atom_context *ctx)
{
- if (ctx->iio)
- kfree(ctx->iio);
+ kfree(ctx->iio);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9175615bbd8..21a892c6ab9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a2d478e8692..3c38ea46531 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -403,6 +403,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ voltage = &rdev->pm.power_state[req_ps_idx].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
@@ -2308,32 +2321,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
-static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
-
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
@@ -2342,6 +2331,8 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
+ RREG32(SRBM_STATUS2));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2350,112 +2341,283 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+ if (rdev->family >= CHIP_CAYMAN) {
+ dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG + 0x800));
+ }
+}
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VC |
- SOFT_RESET_VGT);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+ crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
}
-static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ SPI_BUSY | VGT_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- evergreen_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset |= SOFT_RESET_DB |
+ SOFT_RESET_CB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_SH |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP |
+ SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- evergreen_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ evergreen_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* Interrupts */
@@ -3280,14 +3442,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
@@ -3310,7 +3472,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
@@ -3320,8 +3482,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -3380,7 +3542,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (cur_size_in_dw > 0xFFFFF)
cur_size_in_dw = 0xFFFFF;
size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3488,7 +3650,7 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index ee4cff534f1..99fb13286fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -36,9 +36,6 @@
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1009,223 +1006,35 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
- * @p: structure holding the parser context.
- *
- * Check if the next packet is a relocation packet3.
- **/
-static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
- struct radeon_cs_packet p3reloc;
- int r;
-
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return false;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return false;
- }
- return true;
-}
-
-/**
- * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
- * @parser: parser structure holding parsing context.
- *
- * Userspace sends a special sequence for VLINE waits.
- * PACKET0 - VLINE_START_END + value
- * PACKET3 - WAIT_REG_MEM poll vline status reg
- * RELOC (P3) - crtc_id in reloc.
- *
- * This function parses this and relocates the VLINE START END
- * and WAIT_REG_MEM packets to the correct crtc.
- * It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
*/
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- struct radeon_cs_packet p3reloc, wait_reg_mem;
- int crtc_id;
- int r;
- uint32_t header, h_idx, reg, wait_reg_mem_info;
- volatile uint32_t *ib;
-
- ib = p->ib.ptr;
-
- /* parse the WAIT_REG_MEM */
- r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
- if (r)
- return r;
-
- /* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
- wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- return -EINVAL;
- }
-
- wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
- /* bit 4 is reg (0) or mem (1) */
- if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- return -EINVAL;
- }
- /* waiting for value to be equal */
- if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- return -EINVAL;
- }
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- return -EINVAL;
- }
-
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- return -EINVAL;
- }
-
- /* jump over the NOP */
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
- if (r)
- return r;
-
- h_idx = p->idx - 2;
- p->idx += wait_reg_mem.count + 2;
- p->idx += p3reloc.count + 2;
- header = radeon_get_ib_value(p, h_idx);
- crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
- }
- crtc = obj_to_crtc(obj);
- radeon_crtc = to_radeon_crtc(crtc);
- crtc_id = radeon_crtc->crtc_id;
-
- if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
- ib[h_idx + 2] = PACKET2(0);
- ib[h_idx + 3] = PACKET2(0);
- ib[h_idx + 4] = PACKET2(0);
- ib[h_idx + 5] = PACKET2(0);
- ib[h_idx + 6] = PACKET2(0);
- ib[h_idx + 7] = PACKET2(0);
- ib[h_idx + 8] = PACKET2(0);
- } else {
- switch (reg) {
- case EVERGREEN_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
- ib[h_idx] = header;
- ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
- }
- return 0;
+ static uint32_t vline_start_end[6] = {
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+ static uint32_t vline_status[6] = {
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1347,7 +1156,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_LSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1376,7 +1185,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1418,7 +1227,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1430,7 +1239,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1442,7 +1251,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1454,7 +1263,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1477,7 +1286,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1499,7 +1308,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1563,7 +1372,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1581,7 +1390,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1642,7 +1451,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1670,7 +1479,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1703,7 +1512,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_FMASK:
case CB_COLOR7_FMASK:
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1720,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_CMASK:
case CB_COLOR7_CMASK:
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1758,7 +1567,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1774,7 +1583,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_BASE:
case CB_COLOR10_BASE:
case CB_COLOR11_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1787,7 +1596,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1905,7 +1714,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_13:
case SQ_ALU_CONST_CACHE_LS_14:
case SQ_ALU_CONST_CACHE_LS_15:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1919,7 +1728,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1933,7 +1742,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -2018,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -2064,7 +1873,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -2091,7 +1900,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -2119,7 +1928,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -2210,7 +2019,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
@@ -2231,7 +2040,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -2243,6 +2052,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -2282,7 +2094,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -2320,7 +2132,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -2354,7 +2166,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -2370,7 +2182,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2391,7 +2203,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -2413,7 +2225,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -2480,7 +2292,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2511,13 +2323,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
!mip_address &&
- !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+ !radeon_cs_packet_next_is_pkt3_nop(p)) {
/* MIP_ADDRESS should point to FMASK for an MSAA texture.
* It should be 0 if FMASK is disabled. */
moffset = 0;
mipmap = NULL;
} else {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2536,7 +2348,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
return -EINVAL;
@@ -2618,7 +2430,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2637,7 +2449,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2662,7 +2474,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2691,7 +2503,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2715,7 +2527,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2819,7 +2631,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2827,12 +2639,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = evergreen_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = evergreen_packet3_check(p, &pkt);
break;
default:
@@ -2858,16 +2670,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-/*
- * DMA
- */
-
-#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
-#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
-#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
-#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
-#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
-
/**
* evergreen_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
@@ -2881,9 +2683,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
- u32 idx, idx_value;
+ u32 idx;
u64 src_offset, dst_offset, dst2_offset;
int r;
@@ -2897,9 +2699,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
@@ -2908,19 +2708,27 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- if (tiled) {
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
- } else {
+ break;
+ /* linear */
+ case 0:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2939,338 +2747,330 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
- if (tiled) {
- idx_value = radeon_get_ib_value(p, idx + 2);
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- p->idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- case 5:
- /* T2T partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- p->idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx + 7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- switch (misc) {
- case 0:
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
- break;
- case 1:
- /* L2L, partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-
- p->idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
- src_offset = radeon_get_ib_value(p, idx+3);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ /* L2T, T2L */
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- /* L2L, dw */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
@@ -3583,19 +3383,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
idx += pkt.count + 2;
break;
@@ -3623,88 +3423,79 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
u32 idx = 0;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
do {
header = ib->ptr[idx];
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
- if (tiled)
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
idx += count + 7;
- else
+ break;
+ /* linear */
+ case 0:
idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
break;
case DMA_PACKET_COPY:
- if (tiled) {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- idx += 9;
- break;
- case 5:
- /* T2T partial */
- idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- switch (misc) {
- case 0:
- idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- }
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- idx += 5;
- break;
- case 1:
- /* L2L, partial */
- idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- /* L2L, dw */
- idx += 5;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 327c08b5418..4fdecc2b404 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
* Rafał Miłecki
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -54,79 +55,18 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
}
/*
- * calculate the crc for a given info frame
- */
-static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void evergreen_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- uint8_t color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -154,7 +94,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -200,9 +143,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
- evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+ evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
evergreen_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c22e5d..f585be16e2d 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -223,6 +223,7 @@
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0bfd0e9e469..982d25ad9af 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -729,6 +729,18 @@
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
@@ -924,20 +936,23 @@
#define CAYMAN_DMA1_CNTL 0xd82c
/* async DMA packets */
-#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
- (((t) & 0x1) << 23) | \
- (((s) & 0x1) << 22) | \
- (((n) & 0xFFFFF) << 0))
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
+ (((sub_cmd) & 0xFF) << 20) |\
+ (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
/* async DMA Packet types */
-#define DMA_PACKET_WRITE 0x2
-#define DMA_PACKET_COPY 0x3
-#define DMA_PACKET_INDIRECT_BUFFER 0x4
-#define DMA_PACKET_SEMAPHORE 0x5
-#define DMA_PACKET_FENCE 0x6
-#define DMA_PACKET_TRAP 0x7
-#define DMA_PACKET_SRBM_WRITE 0x9
-#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_NOP 0xf
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
@@ -980,16 +995,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -998,7 +1004,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 835992d8d06..7cead763be9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -34,6 +34,8 @@
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1310,120 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
-static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 grbm_reset = 0;
+ u32 reset_mask = 0;
+ u32 tmp;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-}
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
-static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1433,29 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- cayman_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- cayman_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ cayman_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/**
@@ -1464,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (cayman-SI).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ mask = RADEON_RESET_DMA;
else
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (dma_status_reg & DMA_IDLE) {
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1843,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (cayman/TN).
*/
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -1866,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFF)
ndw = 0x3FFF;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1880,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -1891,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1905,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 48e5022ee92..079dee202a9 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -49,6 +49,16 @@
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -68,6 +78,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -474,16 +488,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -492,7 +497,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac222d..9db58530be3 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_reloc *reloc;
u32 value;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
@@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
-{
- volatile uint32_t *ib;
- unsigned i;
- unsigned idx;
-
- ib = p->ib.ptr;
- idx = pkt->idx;
- for (i = 0; i <= (pkt->count + 1); i++, idx++) {
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
- }
-}
-
-/**
- * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
@@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the wait until */
- r = r100_cs_packet_parse(p, &waitreloc, p->idx);
+ r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
@@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
}
/* jump over the NOP */
- r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
@@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R100_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return 0;
}
-/**
- * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r100_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
static int r100_get_vtx_size(uint32_t vtx_fmt)
{
int vtx_size;
@@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXOFFSET_1:
case RADEON_PP_TXOFFSET_2:
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T0_3:
case RADEON_PP_CUBIC_OFFSET_T0_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
@@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T1_3:
case RADEON_PP_CUBIC_OFFSET_T1_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
@@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T2_3:
case RADEON_PP_CUBIC_OFFSET_T2_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
@@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
@@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case 0x23:
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
@@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
- if (p->rdev->family >= CHIP_R200)
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r200_packet0_check);
- else
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r100_packet0_check);
- break;
- case PACKET_TYPE2:
- break;
- case PACKET_TYPE3:
- r = r100_packet3_check(p, &pkt);
- break;
- default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
- return -EINVAL;
+ case RADEON_PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
}
- if (r) {
+ if (r)
return r;
- }
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b378ad..eb40888bdfc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -81,10 +81,6 @@ struct r100_cs_track {
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index eab91760fae..f0f8ee69f48 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -64,17 +64,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 98143a5c5b7..b3807edb193 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
@@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d0ba6023a1f..c60350e6872 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_COLOROFFSET2:
case R300_RB3D_COLOROFFSET3:
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[i].robj = reloc->robj;
@@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
i = (reg - R300_TX_OFFSET_0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
@@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
@@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r300.reg_safe_bm,
p->rdev->config.r300.reg_safe_bm_size,
&r300_packet0_check);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r300_packet3_check(p, &pkt);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 002ab038d2a..865e2c9980d 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -29,6 +29,8 @@
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 1f519a5ffb8..ff229a00d27 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -65,17 +65,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_000148_MC_FB_LOCATION 0x000148
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aaafb7..c0dc8d3ba0b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,7 @@
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index becb03e8b32..6d4b5611daf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -103,6 +109,19 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1254,169 +1273,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-/* We doesn't check that the GPU really needs a reset we simply do the
- * reset, it's up to the caller to determine if the GPU needs one. We
- * might add an helper function to check that.
- */
-static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
{
- u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
- S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
- S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
- S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
- S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
- S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
- S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
- S_008010_GUI_ACTIVE(1);
- u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
- S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
- S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
- S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
- S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
- S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
- S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
- S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 tmp;
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
+ RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
+ RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
-
- /* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
- /* Check if any of the rendering block is busy and reset it */
- if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
- (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- tmp = S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
}
- /* Reset CP (we always reset CP) */
- tmp = S_008020_SOFT_RESET_CP(1);
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+ return true;
}
-static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ return reset_mask;
}
-static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ r600_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- r600_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+ S_008020_SOFT_RESET_VGT(1);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
- if (reset_mask & RADEON_RESET_DMA)
- r600_gpu_soft_reset_dma(rdev);
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
mdelay(1);
rv515_mc_resume(rdev, &save);
+ udelay(50);
+
+ r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status2;
-
- srbm_status = RREG32(R_000E50_SRBM_STATUS);
- grbm_status = RREG32(R_008010_GRBM_STATUS);
- grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
- if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1431,15 +1582,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
- dma_status_reg = RREG32(DMA_STATUS_REG);
- if (dma_status_reg & DMA_IDLE) {
+ if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1448,13 +1598,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-int r600_asic_reset(struct radeon_device *rdev)
-{
- return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -4318,14 +4461,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
}
/**
- * r600_get_gpu_clock - return GPU clock counter snapshot
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 77da1f9c0b8..f651881eb0a 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -22,6 +22,8 @@
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e082dca6fee..9fb5780a552 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index be85f75aedd..1c51c08b1fd 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -24,6 +24,8 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9b2512bf1a4..01a3ec83f28 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -31,12 +31,7 @@
#include "r600d.h"
#include "r600_reg_safe.h"
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
-static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+static int r600_nomm;
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
@@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int r600_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- *cs_reloc = p->relocs;
- (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
- * @parser: parser structure holding parsing context.
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct radeon_cs_packet p3reloc;
- int r;
+ static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+ AVIVO_D2MODE_VLINE_START_END};
+ static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+ AVIVO_D2MODE_VLINE_STATUS};
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return 0;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return 0;
- }
- return 1;
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
/**
- * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * r600_cs_common_vline_parse() - common vline parser
* @parser: parser structure holding parsing context.
+ * @vline_start_end: table of vline_start_end registers
+ * @vline_status: table of vline_status registers
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
@@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
* This function parses this and relocates the VLINE START END
* and WAIT_REG_MEM packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
*/
-static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
@@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
- r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
if (r)
return r;
/* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
+ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
@@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ return -EINVAL;
+ }
+ /* bit 8 is me (0) or pfp (1) */
+ if (wait_reg_mem_info & 0x100) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
@@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
/* jump over the NOP */
- r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
if (r)
return r;
@@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R600_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
ib[h_idx + 4] = PACKET2(0);
@@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 6] = PACKET2(0);
ib[h_idx + 7] = PACKET2(0);
ib[h_idx + 8] = PACKET2(0);
- } else if (crtc_id == 1) {
- switch (reg) {
- case AVIVO_D1MODE_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= AVIVO_D2MODE_VLINE_START_END >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
+ } else if (reg == vline_start_end[0]) {
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= vline_start_end[crtc_id] >> 2;
ib[h_idx] = header;
- ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+ ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+ } else {
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
}
-
return 0;
}
@@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_13:
case SQ_ALU_CONST_CACHE_VS_14:
case SQ_ALU_CONST_CACHE_VS_15:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* src address space is memory */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
texture = reloc->robj;
/* tex mip base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
@@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = r600_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r600_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r600_packet3_check(p, &pkt);
break;
default:
@@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-{
- if (p->chunk_relocs_idx == -1) {
- return 0;
- }
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
+#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
@@ -2485,6 +2339,18 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->chunks_array);
}
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
@@ -2543,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
void r600_cs_legacy_init(void)
{
- r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
+ r600_nomm = 1;
}
+#endif
+
/*
* DMA
*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ff80efe9cb7..21ecc0e12dc 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -23,6 +23,7 @@
*
* Authors: Christian König
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -121,79 +122,18 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
}
/*
- * calculate the crc for a given info frame
- */
-static void r600_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- enum r600_hdmi_color_format color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
/*
* build a Audio Info Frame
*/
-static void r600_hdmi_audioinfoframe(
- struct drm_encoder *encoder,
- uint8_t channel_count,
- uint8_t coding_type,
- uint8_t sample_size,
- uint8_t sample_frequency,
- uint8_t format,
- uint8_t channel_allocation,
- uint8_t level_shift,
- int downmix_inhibit
-)
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+ const void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
-
- uint8_t frame[11];
-
- frame[0x0] = 0;
- frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
- frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
- frame[0x3] = format;
- frame[0x4] = channel_allocation;
- frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
- frame[0x6] = 0;
- frame[0x7] = 0;
- frame[0x8] = 0;
- frame[0x9] = 0;
- frame[0xA] = 0;
-
- r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+ const u8 *frame = buffer + 3;
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -320,7 +236,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -371,9 +290,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
- r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
@@ -395,8 +324,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
+ uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+ struct hdmi_audio_infoframe frame;
uint32_t offset;
uint32_t iec;
+ ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
return;
@@ -462,9 +394,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
- 0);
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ DRM_ERROR("failed to setup audio infoframe\n");
+ return;
+ }
+
+ frame.channels = audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack audio infoframe\n");
+ return;
+ }
+ r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_audio_workaround(encoder);
}
@@ -544,7 +488,6 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
- WARN_ON(1);
return;
}
if (!dig->afmt->enabled)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a53402b185..a42ba11a3be 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -182,6 +182,8 @@
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
#define R_0086D8_CP_ME_CNTL 0x86D8
+#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
+#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
#define CP_ME_RAM_DATA 0xC160
@@ -1143,19 +1145,10 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1328,6 +1321,7 @@
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
+#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1395,6 +1389,7 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a08f657329a..8263af3fd83 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -136,6 +136,15 @@ extern int radeon_lockup_timeout;
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
#define RADEON_RESET_DMA (1 << 2)
+#define RADEON_RESET_CP (1 << 3)
+#define RADEON_RESET_GRBM (1 << 4)
+#define RADEON_RESET_DMA1 (1 << 5)
+#define RADEON_RESET_RLC (1 << 6)
+#define RADEON_RESET_SEM (1 << 7)
+#define RADEON_RESET_IH (1 << 8)
+#define RADEON_RESET_VMC (1 << 9)
+#define RADEON_RESET_MC (1 << 10)
+#define RADEON_RESET_DISPLAY (1 << 11)
/*
* Errata workarounds.
@@ -341,7 +350,6 @@ struct radeon_bo {
struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -771,6 +779,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1169,6 +1178,10 @@ struct radeon_asic {
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* get the reference clock */
+ u32 (*get_xclk)(struct radeon_device *rdev);
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1179,7 +1192,9 @@ struct radeon_asic {
void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
- void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ void (*set_page)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
@@ -1757,6 +1772,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
/*
* BIOS helpers.
@@ -1801,7 +1817,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@@ -1847,10 +1863,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1972,6 +1991,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status);
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 0b202c07fe5..aba0a893ea9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -934,6 +934,8 @@ static struct radeon_asic r600_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -946,7 +948,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1018,6 +1020,8 @@ static struct radeon_asic rs780_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1030,7 +1034,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1102,6 +1106,8 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1114,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1186,6 +1192,8 @@ static struct radeon_asic evergreen_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1198,7 +1206,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1207,7 +1215,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1270,6 +1278,8 @@ static struct radeon_asic sumo_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1282,7 +1292,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1291,7 +1301,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1354,6 +1364,8 @@ static struct radeon_asic btc_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1366,7 +1378,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1375,7 +1387,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1438,6 +1450,8 @@ static struct radeon_asic cayman_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1445,7 +1459,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1457,7 +1471,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1468,7 +1482,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1479,7 +1493,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1565,6 +1579,8 @@ static struct radeon_asic trinity_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1572,7 +1588,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1584,7 +1600,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1595,7 +1611,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1606,7 +1622,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1692,6 +1708,8 @@ static struct radeon_asic si_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &si_get_xclk,
+ .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1699,7 +1717,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
@@ -1711,7 +1729,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1722,7 +1740,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1733,7 +1751,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1744,7 +1762,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
@@ -1755,7 +1773,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
}
},
@@ -1944,9 +1962,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
+ case CHIP_OLAND:
rdev->asic = &si_asic;
/* set num crtcs */
- rdev->num_crtc = 6;
+ if (rdev->family == CHIP_OLAND)
+ rdev->num_crtc = 2;
+ else
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 15d70e61307..3535f73ad3e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool emit_wait);
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -389,7 +389,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -407,6 +408,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
/*
* evergreen
@@ -422,7 +424,8 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -473,13 +476,16 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -496,23 +502,27 @@ int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-uint64_t si_get_gpu_clock(struct radeon_device *rdev);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 15f5ded65e0..d96070bf838 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -43,6 +43,12 @@ struct atpx_verify_interface {
u32 function_bits; /* supported functions bit vector */
} __packed;
+struct atpx_px_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_flags; /* which flags are valid */
+ u32 flags; /* flags */
+} __packed;
+
struct atpx_power_control {
u16 size;
u8 dgpu_state;
@@ -123,9 +129,61 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
}
/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+ size_t size;
+ u32 valid_bits;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ kfree(info);
+ return -EINVAL;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ valid_bits = output.flags & output.valid_flags;
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
* radeon_atpx_verify_interface - verify ATPX
*
- * @handle: acpi handle
* @atpx: radeon atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
@@ -406,8 +464,19 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
*/
static int radeon_atpx_init(void)
{
+ int r;
+
/* set up the ATPX handle */
- return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ /* validate the atpx setup */
+ r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 9143fc45e35..efc4f6441ef 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -27,6 +27,8 @@
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5407459e56d..70d38241b08 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_to(struct radeon_cs_parser *p,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = p->ib.sync_to[fence->ring];
- p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
-
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
-
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_cs_sync_to(parser, vm->fence);
- radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0d67674b64b..b097d5b4ff3 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -246,8 +246,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int i = 0;
struct drm_crtc *crtc_p;
- /* avivo cursor image can't end on 128 pixel boundary or
+ /*
+ * avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
+ *
+ * NOTE: It is safe to access crtc->enabled of other crtcs
+ * without holding either the mode_config lock or the other
+ * crtc's lock as long as write access to this flag _always_
+ * grabs all locks.
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d6562bb0c9..44b8034a400 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
"TAHITI",
"PITCAIRN",
"VERDE",
+ "OLAND",
"LAST",
};
@@ -758,6 +759,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ if (!rdev->mode_info.atom_context) {
+ radeon_atombios_fini(rdev);
+ return -ENOMEM;
+ }
+
mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
@@ -777,9 +783,11 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
- kfree(rdev->mode_info.atom_context);
}
+ kfree(rdev->mode_info.atom_context);
+ rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
+ rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 05c96fa0b05..e38fd559f1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1089,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d9bf96ee299..167758488ed 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -118,20 +118,32 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
#endif
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
int radeon_no_wb;
-int radeon_modeset = -1;
+int radeon_modeset = 1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
@@ -199,6 +211,14 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+static struct pci_device_id pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -227,14 +247,6 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
-static struct pci_device_id pciidlist[] = {
- radeon_PCI_IDS
-};
-
-#if defined(CONFIG_DRM_RADEON_KMS)
-MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
-
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -284,6 +296,8 @@ static struct drm_driver driver_old = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+#endif
+
static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -397,8 +411,13 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = radeon_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = radeon_gem_prime_pin,
+ .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_vmap = radeon_gem_prime_vmap,
+ .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -411,10 +430,12 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver;
static struct pci_driver *pdriver;
+#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
+#endif
static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
@@ -427,28 +448,6 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->num_ioctls = radeon_max_ioctl;
-#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->driver_features &= ~DRIVER_MODESET;
- radeon_modeset = 0;
- }
-#endif
- /* if enabled by default */
- if (radeon_modeset == -1) {
-#ifdef CONFIG_DRM_RADEON_KMS
- DRM_INFO("radeon defaulting to kernel modesetting.\n");
- radeon_modeset = 1;
-#else
- DRM_INFO("radeon defaulting to userspace modesetting.\n");
- radeon_modeset = 0;
-#endif
- }
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
@@ -456,9 +455,21 @@ static int __init radeon_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
+
+ } else {
+#ifdef CONFIG_DRM_RADEON_UMS
+ DRM_INFO("radeon userspace modesetting enabled.\n");
+ driver = &driver_old;
+ pdriver = &radeon_pci_driver;
+ driver->driver_features &= ~DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_ioctl;
+#else
+ DRM_ERROR("No UMS support in radeon module!\n");
+ return -EINVAL;
+#endif
}
- /* if the vga console setting is enabled still
- * let modprobe override it */
+
+ /* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e7fdf163a8c..b369d42f7de 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+/* The rest of the file is DEPRECATED! */
+#ifdef CONFIG_DRM_RADEON_UMS
+
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
@@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
struct drm_file *file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-#endif
-
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
} while (0)
+#endif /* CONFIG_DRM_RADEON_UMS */
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index d1fafeabea0..2d91123f275 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -91,6 +91,7 @@ enum radeon_family {
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
+ CHIP_OLAND,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc8489d8c6d..b1746741bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct radeon_fbdev *rfbdev,
+static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
@@ -293,28 +294,13 @@ out_unref:
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
-static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = radeonfb_create(rfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -339,6 +325,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
@@ -347,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeon_fb_find_or_create_single,
+ .fb_probe = radeonfb_create,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -377,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6e24f84755b..2c1341f63dc 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
@@ -971,7 +972,7 @@ retry:
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde,
+ radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
@@ -985,7 +986,7 @@ retry:
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
}
@@ -1009,6 +1010,7 @@ retry:
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
@@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
- struct radeon_ring *ring = &rdev->ring[ridx];
- struct radeon_semaphore *sem = NULL;
+ struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
@@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
- if (vm->fence && radeon_fence_signaled(vm->fence)) {
- radeon_fence_unref(&vm->fence);
- }
-
- if (vm->fence && vm->fence->ring != ridx) {
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- return r;
- }
- }
-
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
- /* estimate number of dw needed */
- /* semaphore, fence and padding */
- ndw = 32;
+ /* padding, etc. */
+ ndw = 64;
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
@@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
- r = radeon_ring_lock(rdev, ring, ndw);
- if (r) {
- return r;
- }
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
- if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
- radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
- radeon_fence_note_sync(vm->fence, ridx);
- }
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ ib.length_dw = 0;
- r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
- radeon_fence_unref(&vm->fence);
- r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index e7710339a6a..8d68e972789 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -28,6 +28,8 @@
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9afb6..c75cb2c6ba7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -185,11 +185,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (info->request == RADEON_INFO_TIMESTAMP) {
if (rdev->family >= CHIP_R600) {
value_ptr64 = (uint64_t*)((unsigned long)info->value);
- if (rdev->family >= CHIP_TAHITI) {
- value64 = si_get_gpu_clock(rdev);
- } else {
- value64 = r600_get_gpu_clock(rdev);
- }
+ value64 = radeon_get_gpu_clock_counter(rdev);
if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
@@ -282,7 +278,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
- value = rdev->clock.spll.reference_freq * 10;
+ if (rdev->asic->get_xclk)
+ value = radeon_get_xclk(rdev) * 10;
+ else
+ value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index b9f06724163..d54d2d7c903 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -27,6 +27,8 @@
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0bfa656aa87..338fd6a74e8 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
- * mclk.
+ * mclk and vddci.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 26c23bb651c..4940af7e75e 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -28,199 +28,71 @@
#include "radeon.h"
#include <drm/radeon_drm.h>
-#include <linux/dma-buf.h>
-
-static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct radeon_bo *bo = attachment->dmabuf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
-
- if (bo->gem_base.export_dma_buf == dma_buf) {
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
- bo->gem_base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
-}
-
-static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
-static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
-static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
{
- return -EINVAL;
-}
-
-static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (bo->vmapping_count) {
- bo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- bo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return bo->dma_buf_vmap.virtual;
}
-static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
- mutex_lock(&dev->struct_mutex);
- bo->vmapping_count--;
- if (bo->vmapping_count == 0) {
- ttm_bo_kunmap(&bo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
}
-const static struct dma_buf_ops radeon_dmabuf_ops = {
- .map_dma_buf = radeon_gem_map_dma_buf,
- .unmap_dma_buf = radeon_gem_unmap_dma_buf,
- .release = radeon_gem_dmabuf_release,
- .kmap = radeon_gem_kmap,
- .kmap_atomic = radeon_gem_kmap_atomic,
- .kunmap = radeon_gem_kunmap,
- .kunmap_atomic = radeon_gem_kunmap_atomic,
- .mmap = radeon_gem_prime_mmap,
- .vmap = radeon_gem_prime_vmap,
- .vunmap = radeon_gem_prime_vunmap,
-};
-
-static int radeon_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct radeon_bo **pbo)
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
- return ret;
- bo = *pbo;
+ return ERR_PTR(ret);
bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
- return 0;
+ return &bo->gem_base;
}
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
- return ERR_PTR(ret);
+ return ret;
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (ret) {
radeon_bo_unreserve(bo);
- return ERR_PTR(ret);
+ return ret;
}
radeon_bo_unreserve(bo);
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
-}
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct radeon_bo *bo;
- int ret;
-
- if (dma_buf->ops == &radeon_dmabuf_ops) {
- bo = dma_buf->priv;
- if (bo->gem_base.dev == dev) {
- drm_gem_object_reference(&bo->gem_base);
- dma_buf_put(dma_buf);
- return &bo->gem_base;
- }
- }
-
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
- if (ret)
- goto fail_unmap;
-
- bo->gem_base.import_attach = attach;
-
- return &bo->gem_base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5d8f735d6aa..7e2c2b7cf18 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3706,4 +3706,19 @@
#define RV530_GB_PIPE_SELECT2 0x4124
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cd72062d5a9..8d58e268ff6 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -109,6 +109,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = ib->sync_to[fence->ring];
+ ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 8e9057b6a36..4d20910899d 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -25,6 +25,8 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index 590309a710b..6927a200daf 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -205,17 +205,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1b2444f4d8f..d63fe1d0f53 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -43,6 +43,31 @@ static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 20e29d23d34..c55f950a4af 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -128,6 +128,10 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_CLKPIN_CNTL 0x660
+# define MUX_TCLK_TO_XCLK (1 << 8)
+# define XTALIN_DIVIDE (1 << 9)
+
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ae8b48205a6..80979ed951e 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -38,6 +38,7 @@
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
@@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
@@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00a37400}
};
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a17730}
+};
+
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
@@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
+ case CHIP_OLAND:
+ chip_name = "OLAND";
+ rlc_chip_name = "OLAND";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
default: BUG();
}
@@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
- } else if (rdev->family == CHIP_VERDE) {
+ } else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
@@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_OLAND:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 6;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 2;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
}
/* Initialize HDP */
@@ -2106,154 +2212,275 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status, grbm_status2;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status2 = RREG32(GRBM_STATUS2);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
+ u32 reset_mask = 0;
+ u32 tmp;
-static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ BCI_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+ /* GRBM_STATUS2 */
+ tmp = RREG32(GRBM_STATUS2);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_BCI |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
-}
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
+ if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- si_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- si_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ grbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ si_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* MC */
@@ -2855,19 +3082,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
@@ -2920,19 +3147,21 @@ void si_vm_fini(struct radeon_device *rdev)
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (SI).
*/
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -2943,11 +3172,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFE)
ndw = 0x3FFE;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+ ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1));
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2959,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -2972,9 +3201,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2986,8 +3215,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -3001,20 +3230,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
else
value = 0;
/* for physically contiguous pages (vram) */
- radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
- radeon_ring_write(ring, pe); /* dst addr */
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- radeon_ring_write(ring, r600_flags); /* mask */
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, value); /* value */
- radeon_ring_write(ring, upper_32_bits(value));
- radeon_ring_write(ring, incr); /* increment size */
- radeon_ring_write(ring, 0);
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
}
}
@@ -4378,14 +4609,14 @@ void si_fini(struct radeon_device *rdev)
}
/**
- * si_get_gpu_clock - return GPU clock counter snapshot
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c056aae814f..23fc08fc8e7 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -58,9 +58,22 @@
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
+#define CG_CLKPIN_CNTL 0x660
+# define XTALIN_DIVIDE (1 << 1)
+#define CG_CLKPIN_CNTL_2 0x664
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_STATUS 0xE50
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -81,6 +94,10 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -783,16 +800,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -801,7 +809,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index d1d5306ebf2..f6e0b539505 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -313,9 +313,9 @@ static int shmob_drm_pm_resume(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- mutex_lock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_lock_all(sdev->ddev);
shmob_drm_crtc_resume(&sdev->crtc);
- mutex_unlock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_unlock_all(sdev->ddev);
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index be1daf7344d..c92955df065 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,6 +4,7 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b6679b36700..de94707b9db 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -17,26 +17,257 @@
#include "drm.h"
#include "dc.h"
-struct tegra_dc_window {
- fixed20_12 x;
- fixed20_12 y;
- fixed20_12 w;
- fixed20_12 h;
- unsigned int outx;
- unsigned int outy;
- unsigned int outw;
- unsigned int outh;
- unsigned int stride;
- unsigned int fmt;
+struct tegra_plane {
+ struct drm_plane base;
+ unsigned int index;
};
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_plane, base);
+}
+
+static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_dc_window window;
+ unsigned int i;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = src_x >> 16;
+ window.src.y = src_y >> 16;
+ window.src.w = src_w >> 16;
+ window.src.h = src_h >> 16;
+ window.dst.x = crtc_x;
+ window.dst.y = crtc_y;
+ window.dst.w = crtc_w;
+ window.dst.h = crtc_h;
+ window.format = tegra_dc_format(fb->pixel_format);
+ window.bits_per_pixel = fb->bits_per_pixel;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+
+ window.base[i] = gem->paddr + fb->offsets[i];
+
+ /*
+ * Tegra doesn't support different strides for U and V planes
+ * so we display a warning if the user tries to display a
+ * framebuffer with such a configuration.
+ */
+ if (i >= 2) {
+ if (fb->pitches[i] != window.stride[1])
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ } else {
+ window.stride[i] = fb->pitches[i];
+ }
+ }
+
+ return tegra_dc_setup_window(dc, p->index, &window);
+}
+
+static int tegra_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long value;
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ tegra_plane_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs tegra_plane_funcs = {
+ .update_plane = tegra_plane_update,
+ .disable_plane = tegra_plane_disable,
+ .destroy = tegra_plane_destroy,
+};
+
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct tegra_plane *plane;
+
+ plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return -ENOMEM;
+
+ plane->index = 1 + i;
+
+ err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_plane_funcs, plane_formats,
+ ARRAY_SIZE(plane_formats), false);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
+ unsigned long value;
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = fb->offsets[0] + y * fb->pitches[0] +
+ x * fb->bits_per_pixel / 8;
+
+ tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
+
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+void tegra_dc_enable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+void tegra_dc_disable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value &= ~VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
+{
+ struct drm_device *drm = dc->base.dev;
+ struct drm_crtc *crtc = &dc->base;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags, base;
+
+ if (!dc->event)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+
+ /* check if new start address has been latched */
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+ if (base == gem->paddr + crtc->fb->offsets[0]) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, dc->pipe, dc->event);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+}
+
+void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (dc->event && dc->event->base.file_priv == file) {
+ dc->event->base.destroy(&dc->event->base);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+
+ if (dc->event)
+ return -EBUSY;
+
+ if (event) {
+ event->pipe = dc->pipe;
+ dc->event = event;
+ drm_vblank_get(drm, dc->pipe);
+ }
+
+ tegra_dc_set_base(dc, 0, 0, fb);
+ crtc->fb = fb;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
};
-static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct drm_device *drm = crtc->dev;
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ if (plane->crtc == crtc) {
+ tegra_plane_disable(plane);
+ plane->crtc = NULL;
+
+ if (plane->fb) {
+ drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ }
+ }
+ }
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -46,10 +277,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
unsigned int bpp)
{
fixed20_12 outf = dfixed_init(out);
+ fixed20_12 inf = dfixed_init(in);
u32 dda_inc;
int max;
@@ -79,9 +311,10 @@ static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
return dda_inc;
}
-static inline u32 compute_initial_dda(fixed20_12 in)
+static inline u32 compute_initial_dda(unsigned int in)
{
- return dfixed_frac(in);
+ fixed20_12 inf = dfixed_init(in);
+ return dfixed_frac(inf);
}
static int tegra_dc_set_timings(struct tegra_dc *dc,
@@ -152,18 +385,198 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
return 0;
}
+static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planar)
+ *planar = false;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ if (planar)
+ *planar = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window)
+{
+ unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+ unsigned long value;
+ bool yuv, planar;
+
+ /*
+ * For YUV planar modes, the number of bytes per pixel takes into
+ * account only the luma component and therefore is 1.
+ */
+ yuv = tegra_dc_format_is_yuv(window->format, &planar);
+ if (!yuv)
+ bpp = window->bits_per_pixel / 8;
+ else
+ bpp = planar ? 1 : 2;
+
+ value = WINDOW_A_SELECT << index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ h_offset = window->src.x * bpp;
+ v_offset = window->src.y;
+ h_size = window->src.w * bpp;
+ v_size = window->src.h;
+
+ value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ /*
+ * For DDA computations the number of bytes per pixel for YUV planar
+ * modes needs to take into account all Y, U and V components.
+ */
+ if (yuv && planar)
+ bpp = 2;
+
+ h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+ v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(window->src.x);
+ v_dda = compute_initial_dda(window->src.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+
+ if (yuv && planar) {
+ tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+ value = window->stride[1] << 16 | window->stride[0];
+ tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (yuv) {
+ /* setup default colorspace conversion coefficients */
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+
+ value |= CSC_ENABLE;
+ } else if (window->bits_per_pixel < 24) {
+ value |= COLOR_EXPAND;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ /*
+ * Disable blending and assume Window A is the bottom-most window,
+ * Window C is the top-most window and Window B is in the middle.
+ */
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
+
+ switch (index) {
+ case 0:
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+unsigned int tegra_dc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+
+ case DRM_FORMAT_RGB565:
+ return WIN_COLOR_DEPTH_B5G6R5;
+
+ case DRM_FORMAT_UYVY:
+ return WIN_COLOR_DEPTH_YCbCr422;
+
+ case DRM_FORMAT_YUV420:
+ return WIN_COLOR_DEPTH_YCbCr420P;
+
+ case DRM_FORMAT_YUV422:
+ return WIN_COLOR_DEPTH_YCbCr422P;
+
+ default:
+ break;
+ }
+
+ WARN(1, "unsupported pixel format %u, using default\n", format);
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+}
+
static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned int h_dda, v_dda, bpp;
- struct tegra_dc_window win;
+ struct tegra_dc_window window;
unsigned long div, value;
int err;
+ drm_vblank_pre_modeset(crtc->dev, dc->pipe);
+
err = tegra_crtc_setup_clk(crtc, mode, &div);
if (err) {
dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -191,83 +604,33 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
/* setup window parameters */
- memset(&win, 0, sizeof(win));
- win.x.full = dfixed_const(0);
- win.y.full = dfixed_const(0);
- win.w.full = dfixed_const(mode->hdisplay);
- win.h.full = dfixed_const(mode->vdisplay);
- win.outx = 0;
- win.outy = 0;
- win.outw = mode->hdisplay;
- win.outh = mode->vdisplay;
-
- switch (crtc->fb->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- break;
-
- case DRM_FORMAT_RGB565:
- win.fmt = WIN_COLOR_DEPTH_B5G6R5;
- break;
-
- default:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- WARN_ON(1);
- break;
- }
-
- bpp = crtc->fb->bits_per_pixel / 8;
- win.stride = crtc->fb->pitches[0];
-
- /* program window registers */
- value = WINDOW_A_SELECT;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
-
- value = V_POSITION(win.outy) | H_POSITION(win.outx);
- tegra_dc_writel(dc, value, DC_WIN_POSITION);
-
- value = V_SIZE(win.outh) | H_SIZE(win.outw);
- tegra_dc_writel(dc, value, DC_WIN_SIZE);
-
- value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
- H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
- tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
-
- h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
- v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
-
- value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
- tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
-
- h_dda = compute_initial_dda(win.x);
- v_dda = compute_initial_dda(win.y);
-
- tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
- tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
-
- tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
- tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
-
- tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
- tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
- tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
- DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
-
- value = WIN_ENABLE;
-
- if (bpp < 24)
- value |= COLOR_EXPAND;
+ memset(&window, 0, sizeof(window));
+ window.src.x = 0;
+ window.src.y = 0;
+ window.src.w = mode->hdisplay;
+ window.src.h = mode->vdisplay;
+ window.dst.x = 0;
+ window.dst.y = 0;
+ window.dst.w = mode->hdisplay;
+ window.dst.h = mode->vdisplay;
+ window.format = tegra_dc_format(crtc->fb->pixel_format);
+ window.bits_per_pixel = crtc->fb->bits_per_pixel;
+ window.stride[0] = crtc->fb->pitches[0];
+ window.base[0] = gem->paddr;
+
+ err = tegra_dc_setup_window(dc, 0, &window);
+ if (err < 0)
+ dev_err(dc->dev, "failed to enable root plane\n");
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+ return 0;
+}
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
- return 0;
+ return tegra_dc_set_base(dc, x, y, crtc->fb);
}
static void tegra_crtc_prepare(struct drm_crtc *crtc)
@@ -314,31 +677,24 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long update_mask;
unsigned long value;
- update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
-
- tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+ drm_vblank_post_modeset(crtc->dev, dc->pipe);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -346,15 +702,16 @@ static void tegra_crtc_load_lut(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .dpms = tegra_crtc_dpms,
+ .disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
.mode_set = tegra_crtc_mode_set,
+ .mode_set_base = tegra_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.load_lut = tegra_crtc_load_lut,
};
-static irqreturn_t tegra_drm_irq(int irq, void *data)
+static irqreturn_t tegra_dc_irq(int irq, void *data)
{
struct tegra_dc *dc = data;
unsigned long status;
@@ -373,6 +730,7 @@ static irqreturn_t tegra_drm_irq(int irq, void *data)
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_handle_vblank(dc->base.dev, dc->pipe);
+ tegra_dc_finish_page_flip(dc);
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
@@ -587,7 +945,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
DUMP_REG(DC_WIN_BLEND_1WIN);
DUMP_REG(DC_WIN_BLEND_2WIN_X);
DUMP_REG(DC_WIN_BLEND_2WIN_Y);
- DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
DUMP_REG(DC_WINBUF_START_ADDR);
DUMP_REG(DC_WINBUF_START_ADDR_NS);
@@ -689,13 +1047,17 @@ static int tegra_dc_drm_init(struct host1x_client *client,
return err;
}
+ err = tegra_dc_add_planes(drm, dc);
+ if (err < 0)
+ return err;
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_dc_debugfs_init(dc, drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
- err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
dev_name(dc->dev), dc);
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
@@ -744,6 +1106,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
+ spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 99977b5d5c3..79eaec9aac7 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -58,6 +58,8 @@
#define DC_CMD_SIGNAL_RAISE3 0x03e
#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX (1 << 0)
+#define WRITE_MUX (1 << 2)
#define DC_CMD_STATE_CONTROL 0x041
#define GENERAL_ACT_REQ (1 << 0)
@@ -290,8 +292,18 @@
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+
#define DC_WIN_WIN_OPTIONS 0x700
#define COLOR_EXPAND (1 << 6)
+#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
#define DC_WIN_BYTE_SWAP 0x701
@@ -359,7 +371,7 @@
#define DC_WIN_BLEND_1WIN 0x710
#define DC_WIN_BLEND_2WIN_X 0x711
#define DC_WIN_BLEND_2WIN_Y 0x712
-#define DC_WIN_BLEND32WIN_XY 0x713
+#define DC_WIN_BLEND_3WIN_XY 0x713
#define DC_WIN_HP_FETCH_CONTROL 0x714
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d980dc75788..9d452df5bca 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -39,6 +39,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
return err;
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
err = tegra_drm_fb_init(drm);
if (err < 0)
return err;
@@ -88,13 +92,112 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
.lastclose = tegra_drm_lastclose,
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 741b5dc2742..6dd75a2600e 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -18,16 +18,6 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fixed.h>
-struct tegra_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_cma_object *obj;
-};
-
-static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct tegra_framebuffer, base);
-}
-
struct host1x {
struct drm_device *drm;
struct device *dev;
@@ -44,7 +34,6 @@ struct host1x {
struct list_head clients;
struct drm_fbdev_cma *fbdev;
- struct tegra_framebuffer fb;
};
struct host1x_client;
@@ -75,6 +64,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ spinlock_t lock;
struct host1x *host1x;
struct device *dev;
@@ -94,6 +84,9 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
struct dentry *debugfs;
+
+ /* page-flip handling */
+ struct drm_pending_vblank_event *event;
};
static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
@@ -118,6 +111,34 @@ static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
return readl(dc->regs + (reg << 2));
}
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int format;
+ unsigned int stride[2];
+ unsigned long base[3];
+};
+
+/* from dc.c */
+extern unsigned int tegra_dc_format(uint32_t format);
+extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window);
+extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
+ struct drm_file *file);
+
struct tegra_output_ops {
int (*enable)(struct tegra_output *output);
int (*disable)(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 97993c6835f..03914953cb1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -39,10 +39,6 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
host1x->fbdev = fbdev;
return 0;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d4f3fb9f0c2..bb747f6cd1a 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -10,12 +10,15 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
+#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/clk/tegra.h>
+#include <drm/drm_edid.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
@@ -400,54 +403,65 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
return 0;
}
-static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
- unsigned int offset, u8 type,
- u8 version, void *data, size_t size)
+static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
{
- unsigned long value;
- u8 *ptr = data;
- u32 subpack[2];
+ unsigned long value = 0;
size_t i;
- u8 csum;
- /* first byte of data is the checksum */
- csum = type + version + size - 1;
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
- for (i = 1; i < size; i++)
- csum += ptr[i];
+ return value;
+}
- ptr[0] = 0x100 - csum;
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
+ size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ unsigned long value;
+ size_t i, j;
- value = INFOFRAME_HEADER_TYPE(type) |
- INFOFRAME_HEADER_VERSION(version) |
- INFOFRAME_HEADER_LEN(size - 1);
- tegra_hdmi_writel(hdmi, value, offset);
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
+ break;
- /* The audio inforame only has one set of subpack registers. The hdmi
- * block pads the rest of the data as per the spec so we have to fixup
- * the length before filling in the subpacks.
- */
- if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
- size = 6;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
- /* each subpack 7 bytes devided into:
- * subpack_low - bytes 0 - 3
- * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
- */
- for (i = 0; i < size; i++) {
- size_t index = i % 7;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
+ break;
+
+ default:
+ dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
+ }
+
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_hdmi_writel(hdmi, value, offset);
+ offset++;
- if (index == 0)
- memset(subpack, 0x0, sizeof(subpack));
+ /*
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
- ((u8 *)subpack)[index] = ptr[i];
+ value = tegra_hdmi_subpack(&ptr[i], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
- if (index == 6 || (i + 1 == size)) {
- unsigned int reg = offset + 1 + (i / 7) * 2;
+ num = min_t(size_t, rem - num, 3);
- tegra_hdmi_writel(hdmi, subpack[0], reg);
- tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
- }
+ value = tegra_hdmi_subpack(&ptr[i + 4], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
}
}
@@ -455,9 +469,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- unsigned int h_front_porch;
- unsigned int hsize = 16;
- unsigned int vsize = 9;
+ u8 buffer[17];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -465,69 +478,19 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
return;
}
- h_front_porch = mode->hsync_start - mode->hdisplay;
- memset(&frame, 0, sizeof(frame));
- frame.r = HDMI_AVI_R_SAME;
-
- switch (mode->vdisplay) {
- case 480:
- if (mode->hdisplay == 640) {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 1;
- } else {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 3;
- }
- break;
-
- case 576:
- if (((hsize * 10) / vsize) > 14) {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 18;
- } else {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 17;
- }
- break;
-
- case 720:
- case 1470: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- if (h_front_porch == 110)
- frame.vic = 4;
- else
- frame.vic = 19;
- break;
-
- case 1080:
- case 2205: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- switch (h_front_porch) {
- case 88:
- frame.vic = 16;
- break;
-
- case 528:
- frame.vic = 31;
- break;
-
- default:
- frame.vic = 32;
- break;
- }
- break;
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
- default:
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 0;
- break;
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
- &frame, sizeof(frame));
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
@@ -536,6 +499,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -543,14 +508,29 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
return;
}
- memset(&frame, 0, sizeof(frame));
- frame.cc = HDMI_AUDIO_CC_2;
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+ err);
+ return;
+ }
+
+ frame.channels = 2;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
+ err);
+ return;
+ }
- tegra_hdmi_write_infopack(hdmi,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AUDIO,
- HDMI_AUDIO_VERSION,
- &frame, sizeof(frame));
+ /*
+ * The audio infoframe has only one set of subpack registers, so the
+ * infoframe needs to be truncated. One set of subpack registers can
+ * contain 7 bytes. Including the 3 byte header only the first 10
+ * bytes can be programmed.
+ */
+ tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -558,8 +538,10 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
{
- struct hdmi_stereo_infoframe frame;
+ struct hdmi_vendor_infoframe frame;
unsigned long value;
+ u8 buffer[10];
+ ssize_t err;
if (!hdmi->stereo) {
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
@@ -569,22 +551,32 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
}
memset(&frame, 0, sizeof(frame));
- frame.regid0 = 0x03;
- frame.regid1 = 0x0c;
- frame.regid2 = 0x00;
- frame.hdmi_video_format = 2;
+
+ frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame.version = 0x01;
+ frame.length = 6;
+
+ frame.data[0] = 0x03; /* regid0 */
+ frame.data[1] = 0x0c; /* regid1 */
+ frame.data[2] = 0x00; /* regid2 */
+ frame.data[3] = 0x02 << 5; /* video format */
/* TODO: 74 MHz limit? */
if (1) {
- frame._3d_structure = 0;
+ frame.data[4] = 0x00 << 4; /* 3D structure */
} else {
- frame._3d_structure = 8;
- frame._3d_ext_data = 0;
+ frame.data[4] = 0x08 << 4; /* 3D structure */
+ frame.data[5] = 0x00 << 4; /* 3D ext. data */
+ }
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
+ err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
- HDMI_INFOFRAME_TYPE_VENDOR,
- HDMI_VENDOR_VERSION, &frame, 6);
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 1477f36eb45..52ac36e08cc 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -10,195 +10,6 @@
#ifndef TEGRA_HDMI_H
#define TEGRA_HDMI_H 1
-#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
-#define HDMI_INFOFRAME_TYPE_AVI 0x82
-#define HDMI_INFOFRAME_TYPE_SPD 0x83
-#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
-#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
-#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
-
-/* all fields little endian */
-struct hdmi_avi_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned s:2; /* scan information */
- unsigned b:2; /* bar info data valid */
- unsigned a:1; /* active info present */
- unsigned y:2; /* RGB or YCbCr */
- unsigned res1:1;
-
- /* PB2 */
- unsigned r:4; /* active format aspect ratio */
- unsigned m:2; /* picture aspect ratio */
- unsigned c:2; /* colorimetry */
-
- /* PB3 */
- unsigned sc:2; /* scan information */
- unsigned q:2; /* quantization range */
- unsigned ec:3; /* extended colorimetry */
- unsigned itc:1; /* it content */
-
- /* PB4 */
- unsigned vic:7; /* video format id code */
- unsigned res4:1;
-
- /* PB5 */
- unsigned pr:4; /* pixel repetition factor */
- unsigned cn:2; /* it content type*/
- unsigned yq:2; /* ycc quantization range */
-
- /* PB6-7 */
- u16 top_bar_end_line;
-
- /* PB8-9 */
- u16 bot_bar_start_line;
-
- /* PB10-11 */
- u16 left_bar_end_pixel;
-
- /* PB12-13 */
- u16 right_bar_start_pixel;
-} __packed;
-
-#define HDMI_AVI_VERSION 0x02
-
-#define HDMI_AVI_Y_RGB 0x0
-#define HDMI_AVI_Y_YCBCR_422 0x1
-#define HDMI_AVI_Y_YCBCR_444 0x2
-
-#define HDMI_AVI_B_VERT 0x1
-#define HDMI_AVI_B_HORIZ 0x2
-
-#define HDMI_AVI_S_NONE 0x0
-#define HDMI_AVI_S_OVERSCAN 0x1
-#define HDMI_AVI_S_UNDERSCAN 0x2
-
-#define HDMI_AVI_C_NONE 0x0
-#define HDMI_AVI_C_SMPTE 0x1
-#define HDMI_AVI_C_ITU_R 0x2
-#define HDMI_AVI_C_EXTENDED 0x4
-
-#define HDMI_AVI_M_4_3 0x1
-#define HDMI_AVI_M_16_9 0x2
-
-#define HDMI_AVI_R_SAME 0x8
-#define HDMI_AVI_R_4_3_CENTER 0x9
-#define HDMI_AVI_R_16_9_CENTER 0xa
-#define HDMI_AVI_R_14_9_CENTER 0xb
-
-/* all fields little endian */
-struct hdmi_audio_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned cc:3; /* channel count */
- unsigned res1:1;
- unsigned ct:4; /* coding type */
-
- /* PB2 */
- unsigned ss:2; /* sample size */
- unsigned sf:3; /* sample frequency */
- unsigned res2:3;
-
- /* PB3 */
- unsigned cxt:5; /* coding extention type */
- unsigned res3:3;
-
- /* PB4 */
- u8 ca; /* channel/speaker allocation */
-
- /* PB5 */
- unsigned res5:3;
- unsigned lsv:4; /* level shift value */
- unsigned dm_inh:1; /* downmix inhibit */
-
- /* PB6-10 reserved */
- u8 res6;
- u8 res7;
- u8 res8;
- u8 res9;
- u8 res10;
-} __packed;
-
-#define HDMI_AUDIO_VERSION 0x01
-
-#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CC_2 0x1
-#define HDMI_AUDIO_CC_3 0x2
-#define HDMI_AUDIO_CC_4 0x3
-#define HDMI_AUDIO_CC_5 0x4
-#define HDMI_AUDIO_CC_6 0x5
-#define HDMI_AUDIO_CC_7 0x6
-#define HDMI_AUDIO_CC_8 0x7
-
-#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CT_PCM 0x1
-#define HDMI_AUDIO_CT_AC3 0x2
-#define HDMI_AUDIO_CT_MPEG1 0x3
-#define HDMI_AUDIO_CT_MP3 0x4
-#define HDMI_AUDIO_CT_MPEG2 0x5
-#define HDMI_AUDIO_CT_AAC_LC 0x6
-#define HDMI_AUDIO_CT_DTS 0x7
-#define HDMI_AUDIO_CT_ATRAC 0x8
-#define HDMI_AUDIO_CT_DSD 0x9
-#define HDMI_AUDIO_CT_E_AC3 0xa
-#define HDMI_AUDIO_CT_DTS_HD 0xb
-#define HDMI_AUDIO_CT_MLP 0xc
-#define HDMI_AUDIO_CT_DST 0xd
-#define HDMI_AUDIO_CT_WMA_PRO 0xe
-#define HDMI_AUDIO_CT_CXT 0xf
-
-#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUIDO_SF_32K 0x1
-#define HDMI_AUDIO_SF_44_1K 0x2
-#define HDMI_AUDIO_SF_48K 0x3
-#define HDMI_AUDIO_SF_88_2K 0x4
-#define HDMI_AUDIO_SF_96K 0x5
-#define HDMI_AUDIO_SF_176_4K 0x6
-#define HDMI_AUDIO_SF_192K 0x7
-
-#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_SS_16BIT 0x1
-#define HDMI_AUDIO_SS_20BIT 0x2
-#define HDMI_AUDIO_SS_24BIT 0x3
-
-#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
-#define HDMI_AUDIO_CXT_HE_AAC 0x1
-#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
-#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
-
-/* all fields little endian */
-struct hdmi_stereo_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- u8 regid0;
-
- /* PB2 */
- u8 regid1;
-
- /* PB3 */
- u8 regid2;
-
- /* PB4 */
- unsigned res1:5;
- unsigned hdmi_video_format:3;
-
- /* PB5 */
- unsigned res2:4;
- unsigned _3d_structure:4;
-
- /* PB6*/
- unsigned res3:4;
- unsigned _3d_ext_data:4;
-} __packed;
-
-#define HDMI_VENDOR_VERSION 0x01
-
/* register definitions */
#define HDMI_CTXSW 0x00
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
new file mode 100644
index 00000000000..d24d0401347
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_TILCDC
+ tristate "DRM Support for TI LCDC Display Controller"
+ depends on DRM && OF && ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select OF_VIDEOMODE
+ select OF_DISPLAY_TIMING
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Choose this option if you have an TI SoC with LCDC display
+ controller, for example AM33xx in beagle-bone, DA8xx, or
+ OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
new file mode 100644
index 00000000000..deda656b10e
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -0,0 +1,10 @@
+ccflags-y := -Iinclude/drm -Werror
+
+tilcdc-y := \
+ tilcdc_crtc.o \
+ tilcdc_tfp410.o \
+ tilcdc_slave.o \
+ tilcdc_panel.o \
+ tilcdc_drv.o
+
+obj-$(CONFIG_DRM_TILCDC) += tilcdc.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
new file mode 100644
index 00000000000..5dd3c7d031d
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+
+struct tilcdc_crtc {
+ struct drm_crtc base;
+
+ const struct tilcdc_panel_info *info;
+ uint32_t dirty;
+ dma_addr_t start, end;
+ struct drm_pending_vblank_event *event;
+ int dpms;
+ wait_queue_head_t frame_done_wq;
+ bool frame_done;
+
+ /* fb currently set to scanout 0/1: */
+ struct drm_framebuffer *scanout[2];
+
+ /* for deferred fb unref's: */
+ DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
+ struct work_struct work;
+};
+#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
+
+static void unref_worker(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work);
+ struct drm_device *dev = tilcdc_crtc->base.dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.mutex);
+ while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
+ drm_framebuffer_unreference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void set_scanout(struct drm_crtc *crtc, int n)
+{
+ static const uint32_t base_reg[] = {
+ LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG,
+ };
+ static const uint32_t ceil_reg[] = {
+ LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG,
+ };
+ static const uint32_t stat[] = {
+ LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
+ };
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ pm_runtime_get_sync(dev->dev);
+ tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
+ tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
+ if (tilcdc_crtc->scanout[n]) {
+ if (kfifo_put(&tilcdc_crtc->unref_fifo,
+ (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &tilcdc_crtc->work);
+ } else {
+ dev_err(dev->dev, "unref fifo full!\n");
+ drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
+ }
+ }
+ tilcdc_crtc->scanout[n] = crtc->fb;
+ drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
+ tilcdc_crtc->dirty &= ~stat[n];
+ pm_runtime_put_sync(dev->dev);
+}
+
+static void update_scanout(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct drm_gem_cma_object *gem;
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
+ (crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+
+ tilcdc_crtc->end = tilcdc_crtc->start +
+ (crtc->mode.vdisplay * fb->pitches[0]);
+
+ if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
+ /* already enabled, so just mark the frames that need
+ * updating and they will be updated on vblank:
+ */
+ tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
+ drm_vblank_get(dev, 0);
+ } else {
+ /* not enabled yet, so update registers immediately: */
+ set_scanout(crtc, 0);
+ set_scanout(crtc, 1);
+ }
+}
+
+static void start(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ if (priv->rev == 2) {
+ tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ }
+
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void stop(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
+
+ drm_crtc_cleanup(crtc);
+ WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
+ kfifo_free(&tilcdc_crtc->unref_fifo);
+ kfree(tilcdc_crtc);
+}
+
+static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ crtc->fb = fb;
+ tilcdc_crtc->event = event;
+ update_scanout(crtc);
+
+ return 0;
+}
+
+static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* we really only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (tilcdc_crtc->dpms == mode)
+ return;
+
+ tilcdc_crtc->dpms = mode;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ pm_runtime_forbid(dev->dev);
+ start(crtc);
+ } else {
+ tilcdc_crtc->frame_done = false;
+ stop(crtc);
+
+ /* if necessary wait for framedone irq which will still come
+ * before putting things to sleep..
+ */
+ if (priv->rev == 2) {
+ int ret = wait_event_timeout(
+ tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "timeout waiting for framedone\n");
+ }
+ pm_runtime_allow(dev->dev);
+ }
+
+ pm_runtime_put_sync(dev->dev);
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void tilcdc_crtc_commit(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ const struct tilcdc_panel_info *info = tilcdc_crtc->info;
+ uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(crtc, mode);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev->dev);
+
+ /* Configure the Burst Size and fifo threshold of DMA: */
+ reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
+ switch (info->dma_burst_sz) {
+ case 1:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
+ break;
+ case 2:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
+ break;
+ case 4:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
+ break;
+ case 8:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
+ break;
+ case 16:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ reg |= (info->fifo_th << 8);
+ tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
+
+ /* Configure timings: */
+ hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
+ mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+
+ /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+ reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
+ reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
+ LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
+ if (priv->rev == 2) {
+ reg |= (hfp & 0x300) >> 8;
+ reg |= (hbp & 0x300) >> 4;
+ reg |= (hsw & 0x3c0) << 21;
+ }
+ tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
+
+ reg = (((mode->hdisplay >> 4) - 1) << 4) |
+ ((hbp & 0xff) << 24) |
+ ((hfp & 0xff) << 16) |
+ ((hsw & 0x3f) << 10);
+ if (priv->rev == 2)
+ reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
+ tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
+
+ reg = ((mode->vdisplay - 1) & 0x3ff) |
+ ((vbp & 0xff) << 24) |
+ ((vfp & 0xff) << 16) |
+ ((vsw & 0x3f) << 10);
+ tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
+
+ /* Configure display type: */
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
+ ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
+ LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+ reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
+ if (info->tft_alt_mode)
+ reg |= LCDC_TFT_ALT_ENABLE;
+ if (priv->rev == 2) {
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
+ switch (bpp) {
+ case 16:
+ break;
+ case 32:
+ reg |= LCDC_V2_TFT_24BPP_UNPACK;
+ /* fallthrough */
+ case 24:
+ reg |= LCDC_V2_TFT_24BPP_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+ }
+ reg |= info->fdd < 12;
+ tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
+
+ if (info->invert_pxl_clk)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+
+ if (info->sync_ctrl)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+
+ if (info->sync_edge)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+
+ if (info->raster_order)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+
+
+ update_scanout(crtc);
+ tilcdc_crtc_update_clk(crtc);
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ update_scanout(crtc);
+ return 0;
+}
+
+static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
+ .destroy = tilcdc_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = tilcdc_crtc_page_flip,
+};
+
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+ .dpms = tilcdc_crtc_dpms,
+ .mode_fixup = tilcdc_crtc_mode_fixup,
+ .prepare = tilcdc_crtc_prepare,
+ .commit = tilcdc_crtc_commit,
+ .mode_set = tilcdc_crtc_mode_set,
+ .mode_set_base = tilcdc_crtc_mode_set_base,
+ .load_lut = tilcdc_crtc_load_lut,
+};
+
+int tilcdc_crtc_max_width(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int max_width = 0;
+
+ if (priv->rev == 1)
+ max_width = 1024;
+ else if (priv->rev == 2)
+ max_width = 2048;
+
+ return max_width;
+}
+
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int bandwidth;
+
+ if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+ return MODE_VIRTUAL_X;
+
+ /* width must be multiple of 16 */
+ if (mode->hdisplay & 0xf)
+ return MODE_VIRTUAL_X;
+
+ if (mode->vdisplay > 2048)
+ return MODE_VIRTUAL_Y;
+
+ /* filter out modes that would require too much memory bandwidth: */
+ bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode);
+ if (bandwidth > priv->max_bandwidth)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ tilcdc_crtc->info = info;
+}
+
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int dpms = tilcdc_crtc->dpms;
+ unsigned int lcd_clk, div;
+ int ret;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ /* in raster mode, minimum divisor is 2: */
+ ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
+ if (ret) {
+ dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+ crtc->mode.clock);
+ goto out;
+ }
+
+ lcd_clk = clk_get_rate(priv->clk);
+ div = lcd_clk / (crtc->mode.clock * 1000);
+
+ DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
+ DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+
+ /* Configure the LCD clock divisor. */
+ tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+ LCDC_RASTER_MODE);
+
+ if (priv->rev == 2)
+ tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+ LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+ LCDC_V2_CORE_CLK_EN);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+}
+
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ uint32_t stat = tilcdc_read_irqstatus(dev);
+
+ if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
+ stop(crtc);
+ dev_err(dev->dev, "error: %08x\n", stat);
+ tilcdc_clear_irqstatus(dev, stat);
+ start(crtc);
+ } else if (stat & LCDC_PL_LOAD_DONE) {
+ tilcdc_clear_irqstatus(dev, stat);
+ } else {
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ uint32_t dirty = tilcdc_crtc->dirty & stat;
+
+ tilcdc_clear_irqstatus(dev, stat);
+
+ if (dirty & LCDC_END_OF_FRAME0)
+ set_scanout(crtc, 0);
+
+ if (dirty & LCDC_END_OF_FRAME1)
+ set_scanout(crtc, 1);
+
+ drm_handle_vblank(dev, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ tilcdc_crtc->event = NULL;
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (dirty && !tilcdc_crtc->dirty)
+ drm_vblank_put(dev, 0);
+ }
+
+ if (priv->rev == 2) {
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ }
+ tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ if (event && event->base.file_priv == file) {
+ tilcdc_crtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_vblank_put(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+{
+ struct tilcdc_crtc *tilcdc_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+ if (!tilcdc_crtc) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ crtc = &tilcdc_crtc->base;
+
+ tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
+
+ ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unref FIFO\n");
+ goto fail;
+ }
+
+ INIT_WORK(&tilcdc_crtc->work, unref_worker);
+
+ ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+ if (ret < 0)
+ goto fail;
+
+ drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ tilcdc_crtc_destroy(crtc);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
new file mode 100644
index 00000000000..c5b592dc197
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* LCDC DRM driver, based on da8xx-fb */
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+#include "tilcdc_tfp410.h"
+#include "tilcdc_slave.h"
+#include "tilcdc_panel.h"
+
+#include "drm_fb_helper.h"
+
+static LIST_HEAD(module_list);
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs)
+{
+ mod->name = name;
+ mod->funcs = funcs;
+ INIT_LIST_HEAD(&mod->list);
+ list_add(&mod->list, &module_list);
+}
+
+void tilcdc_module_cleanup(struct tilcdc_module *mod)
+{
+ list_del(&mod->list);
+}
+
+static struct of_device_id tilcdc_of_match[];
+
+static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = tilcdc_fb_create,
+ .output_poll_changed = tilcdc_fb_output_poll_changed,
+};
+
+static int modeset_init(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod;
+
+ drm_mode_config_init(dev);
+
+ priv->crtc = tilcdc_crtc_create(dev);
+
+ list_for_each_entry(mod, &module_list, list) {
+ DBG("loading module: %s", mod->name);
+ mod->funcs->modeset_init(mod, dev);
+ }
+
+ if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
+ return -ENXIO;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static int cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct tilcdc_drm_private *priv = container_of(nb,
+ struct tilcdc_drm_private, freq_transition);
+ if (val == CPUFREQ_POSTCHANGE) {
+ if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc_update_clk(priv->crtc);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * DRM operations:
+ */
+
+static int tilcdc_unload(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod, *cur;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ if (priv->clk)
+ clk_put(priv->clk);
+
+ if (priv->mmio)
+ iounmap(priv->mmio);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ dev->dev_private = NULL;
+
+ pm_runtime_disable(dev->dev);
+
+ list_for_each_entry_safe(mod, cur, &module_list, list) {
+ DBG("destroying module: %s", mod->name);
+ mod->funcs->destroy(mod);
+ }
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct tilcdc_drm_private *priv;
+ struct resource *res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+#ifdef CONFIG_CPU_FREQ
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ priv->freq_transition.notifier_call = cpufreq_transition;
+ ret = cpufreq_register_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+ goto fail;
+ }
+#endif
+
+ if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+ priv->max_bandwidth = 1280 * 1024 * 60;
+
+ pm_runtime_enable(dev->dev);
+
+ /* Determine LCD IP Version */
+ pm_runtime_get_sync(dev->dev);
+ switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ case 0x4c100102:
+ priv->rev = 1;
+ break;
+ case 0x4f200800:
+ case 0x4f201000:
+ priv->rev = 2;
+ break;
+ default:
+ dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(dev, LCDC_PID_REG));
+ priv->rev = 1;
+ break;
+ }
+
+ pm_runtime_put_sync(dev->dev);
+
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+ goto fail;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ priv->fbdev = drm_fbdev_cma_init(dev, 16,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ tilcdc_unload(dev);
+ return ret;
+}
+
+static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ tilcdc_crtc_cancel_page_flip(priv->crtc, file);
+}
+
+static void tilcdc_lastclose(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = arg;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return tilcdc_crtc_irq(priv->crtc);
+}
+
+static void tilcdc_irq_preinstall(struct drm_device *dev)
+{
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+}
+
+static int tilcdc_irq_postinstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* enable FIFO underflow irq: */
+ if (priv->rev == 1) {
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
+ } else {
+ tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+ }
+
+ return 0;
+}
+
+static void tilcdc_irq_uninstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* disable irqs that we might have enabled: */
+ if (priv->rev == 1) {
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
+ } else {
+ tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
+ LCDC_FRAME_DONE);
+ }
+
+}
+
+static void enable_vblank(struct drm_device *dev, bool enable)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ u32 reg, mask;
+
+ if (priv->rev == 1) {
+ reg = LCDC_DMA_CTRL_REG;
+ mask = LCDC_V1_END_OF_FRAME_INT_ENA;
+ } else {
+ reg = LCDC_INT_ENABLE_SET_REG;
+ mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
+ }
+
+ if (enable)
+ tilcdc_set(dev, reg, mask);
+ else
+ tilcdc_clear(dev, reg, mask);
+}
+
+static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, true);
+ return 0;
+}
+
+static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, false);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+static const struct {
+ const char *name;
+ uint8_t rev;
+ uint8_t save;
+ uint32_t reg;
+} registers[] = {
+#define REG(rev, save, reg) { #reg, rev, save, reg }
+ /* exists in revision 1: */
+ REG(1, false, LCDC_PID_REG),
+ REG(1, true, LCDC_CTRL_REG),
+ REG(1, false, LCDC_STAT_REG),
+ REG(1, true, LCDC_RASTER_CTRL_REG),
+ REG(1, true, LCDC_RASTER_TIMING_0_REG),
+ REG(1, true, LCDC_RASTER_TIMING_1_REG),
+ REG(1, true, LCDC_RASTER_TIMING_2_REG),
+ REG(1, true, LCDC_DMA_CTRL_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_1_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_1_REG),
+ /* new in revision 2: */
+ REG(2, false, LCDC_RAW_STAT_REG),
+ REG(2, false, LCDC_MASKED_STAT_REG),
+ REG(2, false, LCDC_INT_ENABLE_SET_REG),
+ REG(2, false, LCDC_INT_ENABLE_CLR_REG),
+ REG(2, false, LCDC_END_OF_INT_IND_REG),
+ REG(2, true, LCDC_CLK_ENABLE_REG),
+ REG(2, true, LCDC_INT_ENABLE_SET_REG),
+#undef REG
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tilcdc_regs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned i;
+
+ pm_runtime_get_sync(dev->dev);
+
+ seq_printf(m, "revision: %d\n", priv->rev);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (priv->rev >= registers[i].rev)
+ seq_printf(m, "%s:\t %08x\n", registers[i].name,
+ tilcdc_read(dev, registers[i].reg));
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static struct drm_info_list tilcdc_debugfs_list[] = {
+ { "regs", tilcdc_regs_show, 0 },
+ { "mm", tilcdc_mm_show, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+static int tilcdc_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct tilcdc_module *mod;
+ int ret;
+
+ ret = drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_init)
+ mod->funcs->debugfs_init(mod, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct tilcdc_module *mod;
+ drm_debugfs_remove_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list), minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_cleanup)
+ mod->funcs->debugfs_cleanup(mod, minor);
+}
+#endif
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver tilcdc_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = tilcdc_load,
+ .unload = tilcdc_unload,
+ .preclose = tilcdc_preclose,
+ .lastclose = tilcdc_lastclose,
+ .irq_handler = tilcdc_irq,
+ .irq_preinstall = tilcdc_irq_preinstall,
+ .irq_postinstall = tilcdc_irq_postinstall,
+ .irq_uninstall = tilcdc_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = tilcdc_enable_vblank,
+ .disable_vblank = tilcdc_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = tilcdc_debugfs_init,
+ .debugfs_cleanup = tilcdc_debugfs_cleanup,
+#endif
+ .fops = &fops,
+ .name = "tilcdc",
+ .desc = "TI LCD Controller DRM",
+ .date = "20121205",
+ .major = 1,
+ .minor = 0,
+};
+
+/*
+ * Power management:
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int tilcdc_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ drm_kms_helper_poll_disable(ddev);
+
+ /* Save register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
+
+ return 0;
+}
+
+static int tilcdc_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ /* Restore register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tilcdc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int tilcdc_pdev_probe(struct platform_device *pdev)
+{
+ /* bail out early if no DT data: */
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ return drm_platform_init(&tilcdc_driver, pdev);
+}
+
+static int tilcdc_pdev_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&tilcdc_driver, pdev);
+
+ return 0;
+}
+
+static struct of_device_id tilcdc_of_match[] = {
+ { .compatible = "ti,am33xx-tilcdc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+
+static struct platform_driver tilcdc_platform_driver = {
+ .probe = tilcdc_pdev_probe,
+ .remove = tilcdc_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tilcdc",
+ .pm = &tilcdc_pm_ops,
+ .of_match_table = tilcdc_of_match,
+ },
+};
+
+static int __init tilcdc_drm_init(void)
+{
+ DBG("init");
+ tilcdc_tfp410_init();
+ tilcdc_slave_init();
+ tilcdc_panel_init();
+ return platform_driver_register(&tilcdc_platform_driver);
+}
+
+static void __exit tilcdc_drm_fini(void)
+{
+ DBG("fini");
+ tilcdc_tfp410_fini();
+ tilcdc_slave_fini();
+ tilcdc_panel_fini();
+ platform_driver_unregister(&tilcdc_platform_driver);
+}
+
+late_initcall(tilcdc_drm_init);
+module_exit(tilcdc_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
new file mode 100644
index 00000000000..8242b5a4307
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_DRV_H__
+#define __TILCDC_DRV_H__
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/list.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+struct tilcdc_drm_private {
+ void __iomem *mmio;
+
+ struct clk *disp_clk; /* display dpll */
+ struct clk *clk; /* functional clock */
+ int rev; /* IP revision */
+
+ /* don't attempt resolutions w/ higher W * H * Hz: */
+ uint32_t max_bandwidth;
+
+ /* register contents saved across suspend/resume: */
+ u32 saved_register[12];
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+ unsigned int lcd_fck_rate;
+#endif
+
+ struct workqueue_struct *wq;
+
+ struct drm_fbdev_cma *fbdev;
+
+ struct drm_crtc *crtc;
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+};
+
+/* Sub-module for display. Since we don't know at compile time what panels
+ * or display adapter(s) might be present (for ex, off chip dvi/tfp410,
+ * hdmi encoder, various lcd panels), the connector/encoder(s) are split into
+ * separate drivers. If they are probed and found to be present, they
+ * register themselves with tilcdc_register_module().
+ */
+struct tilcdc_module;
+
+struct tilcdc_module_ops {
+ /* create appropriate encoders/connectors: */
+ int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
+ void (*destroy)(struct tilcdc_module *mod);
+#ifdef CONFIG_DEBUG_FS
+ /* create debugfs nodes (can be NULL): */
+ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
+ /* cleanup debugfs nodes (can be NULL): */
+ void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
+#endif
+};
+
+struct tilcdc_module {
+ const char *name;
+ struct list_head list;
+ const struct tilcdc_module_ops *funcs;
+};
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs);
+void tilcdc_module_cleanup(struct tilcdc_module *mod);
+
+
+/* Panel config that needs to be set in the crtc, but is not coming from
+ * the mode timings. The display module is expected to call
+ * tilcdc_crtc_set_panel_info() to set this during modeset.
+ */
+struct tilcdc_panel_info {
+
+ /* AC Bias Pin Frequency */
+ uint32_t ac_bias;
+
+ /* AC Bias Pin Transitions per Interrupt */
+ uint32_t ac_bias_intrpt;
+
+ /* DMA burst size */
+ uint32_t dma_burst_sz;
+
+ /* Bits per pixel */
+ uint32_t bpp;
+
+ /* FIFO DMA Request Delay */
+ uint32_t fdd;
+
+ /* TFT Alternative Signal Mapping (Only for active) */
+ bool tft_alt_mode;
+
+ /* Invert pixel clock */
+ bool invert_pxl_clk;
+
+ /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
+ uint32_t sync_edge;
+
+ /* Horizontal and Vertical Sync: Control: 0=ignore */
+ uint32_t sync_ctrl;
+
+ /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+ uint32_t raster_order;
+
+ /* DMA FIFO threshold */
+ uint32_t fifo_th;
+};
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info);
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
+int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+
+#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
new file mode 100644
index 00000000000..580b74e2022
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/backlight.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include "tilcdc_drv.h"
+
+struct panel_module {
+ struct tilcdc_module base;
+ struct tilcdc_panel_info *info;
+ struct display_timings *timings;
+ struct backlight_device *backlight;
+};
+#define to_panel_module(x) container_of(x, struct panel_module, base)
+
+
+/*
+ * Encoder:
+ */
+
+struct panel_encoder {
+ struct drm_encoder base;
+ struct panel_module *mod;
+};
+#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
+
+
+static void panel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(panel_encoder);
+}
+
+static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ struct backlight_device *backlight = panel_encoder->mod->backlight;
+
+ if (!backlight)
+ return;
+
+ backlight->props.power = mode == DRM_MODE_DPMS_ON
+ ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(backlight);
+}
+
+static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void panel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
+}
+
+static void panel_encoder_commit(struct drm_encoder *encoder)
+{
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void panel_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs panel_encoder_funcs = {
+ .destroy = panel_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
+ .dpms = panel_encoder_dpms,
+ .mode_fixup = panel_encoder_mode_fixup,
+ .prepare = panel_encoder_prepare,
+ .commit = panel_encoder_commit,
+ .mode_set = panel_encoder_mode_set,
+};
+
+static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
+ struct panel_module *mod)
+{
+ struct panel_encoder *panel_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+ if (!panel_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_encoder->mod = mod;
+
+ encoder = &panel_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ panel_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct panel_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct panel_module *mod;
+};
+#define to_panel_connector(x) container_of(x, struct panel_connector, base)
+
+
+static void panel_connector_destroy(struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(panel_connector);
+}
+
+static enum drm_connector_status panel_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static int panel_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ struct display_timings *timings = panel_connector->mod->timings;
+ int i;
+
+ for (i = 0; i < timings->num_timings; i++) {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct videomode vm;
+
+ if (videomode_from_timing(timings, &vm, i))
+ break;
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (timings->native_mode == i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int panel_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *panel_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ return panel_connector->encoder;
+}
+
+static const struct drm_connector_funcs panel_connector_funcs = {
+ .destroy = panel_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
+ .get_modes = panel_connector_get_modes,
+ .mode_valid = panel_connector_mode_valid,
+ .best_encoder = panel_connector_best_encoder,
+};
+
+static struct drm_connector *panel_connector_create(struct drm_device *dev,
+ struct panel_module *mod, struct drm_encoder *encoder)
+{
+ struct panel_connector *panel_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+ if (!panel_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_connector->encoder = encoder;
+ panel_connector->mod = mod;
+
+ connector = &panel_connector->base;
+
+ drm_connector_init(dev, connector, &panel_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &panel_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ panel_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = panel_encoder_create(dev, panel_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = panel_connector_create(dev, panel_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void panel_destroy(struct tilcdc_module *mod)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+
+ if (panel_mod->timings) {
+ display_timings_release(panel_mod->timings);
+ kfree(panel_mod->timings);
+ }
+
+ tilcdc_module_cleanup(mod);
+ kfree(panel_mod->info);
+ kfree(panel_mod);
+}
+
+static const struct tilcdc_module_ops panel_module_ops = {
+ .modeset_init = panel_modeset_init,
+ .destroy = panel_destroy,
+};
+
+/*
+ * Device:
+ */
+
+/* maybe move this somewhere common if it is needed by other outputs? */
+static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
+{
+ struct device_node *info_np;
+ struct tilcdc_panel_info *info;
+ int ret = 0;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", __func__);
+ return NULL;
+ }
+
+ info_np = of_get_child_by_name(np, "panel-info");
+ if (!info_np) {
+ pr_err("%s: could not find panel-info node\n", __func__);
+ return NULL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: allocation failed\n", __func__);
+ return NULL;
+ }
+
+ ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
+ ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
+ ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
+ ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
+ ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
+ ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
+ ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
+ ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
+ ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
+
+ /* optional: */
+ info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
+ info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
+
+ if (ret) {
+ pr_err("%s: error reading panel-info properties\n", __func__);
+ kfree(info);
+ return NULL;
+ }
+
+ return info;
+}
+
+static struct of_device_id panel_of_match[];
+
+static int panel_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct panel_module *panel_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ int ret = -EINVAL;
+
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
+ if (!panel_mod)
+ return -ENOMEM;
+
+ mod = &panel_mod->base;
+
+ tilcdc_module_init(mod, "panel", &panel_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+
+ panel_mod->timings = of_get_display_timings(node);
+ if (!panel_mod->timings) {
+ dev_err(&pdev->dev, "could not get panel timings\n");
+ goto fail;
+ }
+
+ panel_mod->info = of_get_panel_info(node);
+ if (!panel_mod->info) {
+ dev_err(&pdev->dev, "could not get panel info\n");
+ goto fail;
+ }
+
+ panel_mod->backlight = of_find_backlight_by_node(node);
+ if (panel_mod->backlight)
+ dev_info(&pdev->dev, "found backlight\n");
+
+ return 0;
+
+fail:
+ panel_destroy(mod);
+ return ret;
+}
+
+static int panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id panel_of_match[] = {
+ { .compatible = "ti,tilcdc,panel", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+struct platform_driver panel_driver = {
+ .probe = panel_probe,
+ .remove = panel_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "panel",
+ .of_match_table = panel_of_match,
+ },
+};
+
+int __init tilcdc_panel_init(void)
+{
+ return platform_driver_register(&panel_driver);
+}
+
+void __exit tilcdc_panel_fini(void)
+{
+ platform_driver_unregister(&panel_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.h b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
new file mode 100644
index 00000000000..7db40aacc74
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_PANEL_H__
+#define __TILCDC_PANEL_H__
+
+/* sub-module for generic lcd panel output */
+
+int tilcdc_panel_init(void);
+void tilcdc_panel_fini(void);
+
+#endif /* __TILCDC_PANEL_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
new file mode 100644
index 00000000000..17fd1b45428
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_REGS_H__
+#define __TILCDC_REGS_H__
+
+/* LCDC register definitions, based on da8xx-fb */
+
+#include <linux/bitops.h>
+
+#include "tilcdc_drv.h"
+
+/* LCDC Status Register */
+#define LCDC_END_OF_FRAME1 BIT(9)
+#define LCDC_END_OF_FRAME0 BIT(8)
+#define LCDC_PL_LOAD_DONE BIT(6)
+#define LCDC_FIFO_UNDERFLOW BIT(5)
+#define LCDC_SYNC_LOST BIT(2)
+#define LCDC_FRAME_DONE BIT(0)
+
+/* LCDC DMA Control Register */
+#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_1 0x0
+#define LCDC_DMA_BURST_2 0x1
+#define LCDC_DMA_BURST_4 0x2
+#define LCDC_DMA_BURST_8 0x3
+#define LCDC_DMA_BURST_16 0x4
+#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
+#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
+#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
+#define LCDC_DUAL_FRAME_BUFFER_ENABLE BIT(0)
+
+/* LCDC Control Register */
+#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_RASTER_MODE 0x01
+
+/* LCDC Raster Control Register */
+#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define PALETTE_AND_DATA 0x00
+#define PALETTE_ONLY 0x01
+#define DATA_ONLY 0x02
+
+#define LCDC_MONO_8BIT_MODE BIT(9)
+#define LCDC_RASTER_ORDER BIT(8)
+#define LCDC_TFT_MODE BIT(7)
+#define LCDC_V1_UNDERFLOW_INT_ENA BIT(6)
+#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
+#define LCDC_V1_PL_INT_ENA BIT(4)
+#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_MONOCHROME_MODE BIT(1)
+#define LCDC_RASTER_ENABLE BIT(0)
+#define LCDC_TFT_ALT_ENABLE BIT(23)
+#define LCDC_STN_565_ENABLE BIT(24)
+#define LCDC_V2_DMA_CLK_EN BIT(2)
+#define LCDC_V2_LIDD_CLK_EN BIT(1)
+#define LCDC_V2_CORE_CLK_EN BIT(0)
+#define LCDC_V2_LPP_B10 26
+#define LCDC_V2_TFT_24BPP_MODE BIT(25)
+#define LCDC_V2_TFT_24BPP_UNPACK BIT(26)
+
+/* LCDC Raster Timing 2 Register */
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_SYNC_CTRL BIT(25)
+#define LCDC_SYNC_EDGE BIT(24)
+#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
+#define LCDC_INVERT_HSYNC BIT(21)
+#define LCDC_INVERT_VSYNC BIT(20)
+
+/* LCDC Block */
+#define LCDC_PID_REG 0x0
+#define LCDC_CTRL_REG 0x4
+#define LCDC_STAT_REG 0x8
+#define LCDC_RASTER_CTRL_REG 0x28
+#define LCDC_RASTER_TIMING_0_REG 0x2c
+#define LCDC_RASTER_TIMING_1_REG 0x30
+#define LCDC_RASTER_TIMING_2_REG 0x34
+#define LCDC_DMA_CTRL_REG 0x40
+#define LCDC_DMA_FB_BASE_ADDR_0_REG 0x44
+#define LCDC_DMA_FB_CEILING_ADDR_0_REG 0x48
+#define LCDC_DMA_FB_BASE_ADDR_1_REG 0x4c
+#define LCDC_DMA_FB_CEILING_ADDR_1_REG 0x50
+
+/* Interrupt Registers available only in Version 2 */
+#define LCDC_RAW_STAT_REG 0x58
+#define LCDC_MASKED_STAT_REG 0x5c
+#define LCDC_INT_ENABLE_SET_REG 0x60
+#define LCDC_INT_ENABLE_CLR_REG 0x64
+#define LCDC_END_OF_INT_IND_REG 0x68
+
+/* Clock registers available only on Version 2 */
+#define LCDC_CLK_ENABLE_REG 0x6c
+#define LCDC_CLK_RESET_REG 0x70
+#define LCDC_CLK_MAIN_RESET BIT(3)
+
+
+/*
+ * Helpers:
+ */
+
+static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ iowrite32(data, priv->mmio + reg);
+}
+
+static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return ioread32(priv->mmio + reg);
+}
+
+static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
+}
+
+static inline void tilcdc_clear(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) & ~mask);
+}
+
+/* the register to read/clear irqstatus differs between v1 and v2 of the IP */
+static inline u32 tilcdc_irqstatus_reg(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return (priv->rev == 2) ? LCDC_MASKED_STAT_REG : LCDC_STAT_REG;
+}
+
+static inline u32 tilcdc_read_irqstatus(struct drm_device *dev)
+{
+ return tilcdc_read(dev, tilcdc_irqstatus_reg(dev));
+}
+
+static inline void tilcdc_clear_irqstatus(struct drm_device *dev, u32 mask)
+{
+ tilcdc_write(dev, tilcdc_irqstatus_reg(dev), mask);
+}
+
+#endif /* __TILCDC_REGS_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
new file mode 100644
index 00000000000..568dc1c08e6
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "tilcdc_drv.h"
+
+struct slave_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+};
+#define to_slave_module(x) container_of(x, struct slave_module, base)
+
+static const struct tilcdc_panel_info slave_info = {
+ .bpp = 16,
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+
+/*
+ * Encoder:
+ */
+
+struct slave_encoder {
+ struct drm_encoder_slave base;
+ struct slave_module *mod;
+};
+#define to_slave_encoder(x) container_of(to_encoder_slave(x), struct slave_encoder, base)
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+static void slave_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct slave_encoder *slave_encoder = to_slave_encoder(encoder);
+ if (get_slave_funcs(encoder))
+ get_slave_funcs(encoder)->destroy(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(slave_encoder);
+}
+
+static void slave_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_prepare(encoder);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
+}
+
+static const struct drm_encoder_funcs slave_encoder_funcs = {
+ .destroy = slave_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
+ .dpms = drm_i2c_encoder_dpms,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = slave_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+};
+
+static const struct i2c_board_info info = {
+ I2C_BOARD_INFO("tda998x", 0x70)
+};
+
+static struct drm_encoder *slave_encoder_create(struct drm_device *dev,
+ struct slave_module *mod)
+{
+ struct slave_encoder *slave_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ slave_encoder = kzalloc(sizeof(*slave_encoder), GFP_KERNEL);
+ if (!slave_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_encoder->mod = mod;
+
+ encoder = &slave_encoder->base.base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &slave_encoder_helper_funcs);
+
+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), mod->i2c, &info);
+ if (ret)
+ goto fail;
+
+ return encoder;
+
+fail:
+ slave_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct slave_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct slave_module *mod;
+};
+#define to_slave_connector(x) container_of(x, struct slave_connector, base)
+
+static void slave_connector_destroy(struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(slave_connector);
+}
+
+static enum drm_connector_status slave_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+
+static int slave_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->get_modes(encoder, connector);
+}
+
+static int slave_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
+ if (ret != MODE_OK)
+ return ret;
+
+ return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+}
+
+static struct drm_encoder *slave_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ return slave_connector->encoder;
+}
+
+static int slave_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->set_property(encoder,
+ connector, property, value);
+}
+
+static const struct drm_connector_funcs slave_connector_funcs = {
+ .destroy = slave_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = slave_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = slave_connector_set_property,
+};
+
+static const struct drm_connector_helper_funcs slave_connector_helper_funcs = {
+ .get_modes = slave_connector_get_modes,
+ .mode_valid = slave_connector_mode_valid,
+ .best_encoder = slave_connector_best_encoder,
+};
+
+static struct drm_connector *slave_connector_create(struct drm_device *dev,
+ struct slave_module *mod, struct drm_encoder *encoder)
+{
+ struct slave_connector *slave_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ slave_connector = kzalloc(sizeof(*slave_connector), GFP_KERNEL);
+ if (!slave_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_connector->encoder = encoder;
+ slave_connector->mod = mod;
+
+ connector = &slave_connector->base;
+
+ drm_connector_init(dev, connector, &slave_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &slave_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ slave_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = slave_encoder_create(dev, slave_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = slave_connector_create(dev, slave_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void slave_destroy(struct tilcdc_module *mod)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+
+ tilcdc_module_cleanup(mod);
+ kfree(slave_mod);
+}
+
+static const struct tilcdc_module_ops slave_module_ops = {
+ .modeset_init = slave_modeset_init,
+ .destroy = slave_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id slave_of_match[];
+
+static int slave_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct slave_module *slave_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
+ if (!slave_mod)
+ return -ENOMEM;
+
+ mod = &slave_mod->base;
+
+ tilcdc_module_init(mod, "slave", &slave_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!slave_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ return 0;
+
+fail:
+ slave_destroy(mod);
+ return ret;
+}
+
+static int slave_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id slave_of_match[] = {
+ { .compatible = "ti,tilcdc,slave", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slave_of_match);
+
+struct platform_driver slave_driver = {
+ .probe = slave_probe,
+ .remove = slave_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "slave",
+ .of_match_table = slave_of_match,
+ },
+};
+
+int __init tilcdc_slave_init(void)
+{
+ return platform_driver_register(&slave_driver);
+}
+
+void __exit tilcdc_slave_fini(void)
+{
+ platform_driver_unregister(&slave_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.h b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
new file mode 100644
index 00000000000..2f850484832
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_SLAVE_H__
+#define __TILCDC_SLAVE_H__
+
+/* sub-module for i2c slave encoder output */
+
+int tilcdc_slave_init(void);
+void tilcdc_slave_fini(void);
+
+#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
new file mode 100644
index 00000000000..58d487ba241
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "tilcdc_drv.h"
+
+struct tfp410_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+ int gpio;
+};
+#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
+
+
+static const struct tilcdc_panel_info dvi_info = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+/*
+ * Encoder:
+ */
+
+struct tfp410_encoder {
+ struct drm_encoder base;
+ struct tfp410_module *mod;
+ int dpms;
+};
+#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
+
+
+static void tfp410_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(tfp410_encoder);
+}
+
+static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+
+ if (tfp410_encoder->dpms == mode)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ DBG("Power on");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 1);
+ } else {
+ DBG("Power off");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 0);
+ }
+
+ tfp410_encoder->dpms = mode;
+}
+
+static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void tfp410_encoder_prepare(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
+}
+
+static void tfp410_encoder_commit(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs tfp410_encoder_funcs = {
+ .destroy = tfp410_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
+ .dpms = tfp410_encoder_dpms,
+ .mode_fixup = tfp410_encoder_mode_fixup,
+ .prepare = tfp410_encoder_prepare,
+ .commit = tfp410_encoder_commit,
+ .mode_set = tfp410_encoder_mode_set,
+};
+
+static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
+ struct tfp410_module *mod)
+{
+ struct tfp410_encoder *tfp410_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+ if (!tfp410_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
+ tfp410_encoder->mod = mod;
+
+ encoder = &tfp410_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ tfp410_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct tfp410_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct tfp410_module *mod;
+};
+#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
+
+
+static void tfp410_connector_destroy(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(tfp410_connector);
+}
+
+static enum drm_connector_status tfp410_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+
+ if (drm_probe_ddc(tfp410_connector->mod->i2c))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static int tfp410_connector_get_modes(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int tfp410_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *tfp410_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ return tfp410_connector->encoder;
+}
+
+static const struct drm_connector_funcs tfp410_connector_funcs = {
+ .destroy = tfp410_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
+ .get_modes = tfp410_connector_get_modes,
+ .mode_valid = tfp410_connector_mode_valid,
+ .best_encoder = tfp410_connector_best_encoder,
+};
+
+static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
+ struct tfp410_module *mod, struct drm_encoder *encoder)
+{
+ struct tfp410_connector *tfp410_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+ if (!tfp410_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_connector->encoder = encoder;
+ tfp410_connector->mod = mod;
+
+ connector = &tfp410_connector->base;
+
+ drm_connector_init(dev, connector, &tfp410_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+ drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ tfp410_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = tfp410_encoder_create(dev, tfp410_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = tfp410_connector_create(dev, tfp410_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void tfp410_destroy(struct tilcdc_module *mod)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+ if (tfp410_mod->i2c)
+ i2c_put_adapter(tfp410_mod->i2c);
+
+ if (!IS_ERR_VALUE(tfp410_mod->gpio))
+ gpio_free(tfp410_mod->gpio);
+
+ tilcdc_module_cleanup(mod);
+ kfree(tfp410_mod);
+}
+
+static const struct tilcdc_module_ops tfp410_module_ops = {
+ .modeset_init = tfp410_modeset_init,
+ .destroy = tfp410_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id tfp410_of_match[];
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct tfp410_module *tfp410_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+ if (!tfp410_mod)
+ return -ENOMEM;
+
+ mod = &tfp410_mod->base;
+
+ tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!tfp410_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
+ 0, NULL);
+ if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ dev_warn(&pdev->dev, "No power down GPIO\n");
+ } else {
+ ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
+ if (ret) {
+ dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ tfp410_destroy(mod);
+ return ret;
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id tfp410_of_match[] = {
+ { .compatible = "ti,tilcdc,tfp410", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tfp410_of_match);
+
+struct platform_driver tfp410_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tfp410",
+ .of_match_table = tfp410_of_match,
+ },
+};
+
+int __init tilcdc_tfp410_init(void)
+{
+ return platform_driver_register(&tfp410_driver);
+}
+
+void __exit tilcdc_tfp410_fini(void)
+{
+ platform_driver_unregister(&tfp410_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
new file mode 100644
index 00000000000..5b800f1f6aa
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_TFP410_H__
+#define __TILCDC_TFP410_H__
+
+/* sub-module for tfp410 dvi adaptor */
+
+int tilcdc_tfp410_init(void);
+void tilcdc_tfp410_fini(void);
+
+#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83..9b07b7d44a5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+ bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
return 0;
}
}
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
{
- struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
if (no_wait)
return -EBUSY;
- spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, interruptible);
- spin_lock(&glob->lru_lock);
if (unlikely(ret))
return ret;
}
- atomic_set(&bo->reserved, 1);
if (use_sequence) {
+ bool wake_up = false;
/**
* Wake up waiters that may need to recheck for deadlock,
* if we decreased the sequence number.
*/
if (unlikely((bo->val_seq - sequence < (1 << 31))
|| !bo->seq_valid))
- wake_up_all(&bo->event_queue);
+ wake_up = true;
+ /*
+ * In the worst case with memory ordering these values can be
+ * seen in the wrong order. However since we call wake_up_all
+ * in that case, this will hopefully not pose a problem,
+ * and the worst case would only cause someone to accidentally
+ * hit -EAGAIN in ttm_bo_reserve when they see old value of
+ * val_seq. However this would only happen if seq_valid was
+ * written before val_seq was, and just means some slightly
+ * increased cpu usage
+ */
bo->val_seq = sequence;
bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
} else {
bo->seq_valid = false;
}
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
int put_count = 0;
int ret;
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0))
+ ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0)) {
+ spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
- ttm_bo_list_ref_sub(bo, put_count, true);
+ return ret;
+}
+
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ bool wake_up = false;
+ int ret;
+
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+ WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+ wake_up = true;
+
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
+
+ return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count, ret;
+
+ ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+ if (likely(!ret)) {
+ spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
return ret;
}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+ if (remove_all && ret) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(entry, false, false,
+ false, 0);
+ spin_lock(&glob->lru_lock);
+ }
+
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc5..7b90def1567 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
}
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
- struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->glob;
- int ret;
-
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_wait_unreserved(bo, true);
- spin_lock(&glob->lru_lock);
- if (unlikely(ret != 0))
- ttm_eu_backoff_reservation_locked(list);
- return ret;
-}
-
-
void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -145,47 +129,65 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
-retry:
spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
+retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
-retry_this_bo:
- ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ /* already slowpath reserved? */
+ if (entry->reserved)
+ continue;
+
+ ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
- ret = ttm_eu_wait_unreserved_locked(list, bo);
- if (unlikely(ret != 0)) {
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
- }
- goto retry_this_bo;
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(bo, true, false,
+ true, val_seq);
+ spin_lock(&glob->lru_lock);
+ if (!ret)
+ break;
+
+ if (unlikely(ret != -EAGAIN))
+ goto err;
+
+ /* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
+
+ /*
+ * temporarily increase sequence number every retry,
+ * to prevent us from seeing our old reservation
+ * sequence when someone else reserved the buffer,
+ * but hasn't updated the seq_valid/seqno members yet.
+ */
+ val_seq = entry->bo->bdev->val_seq++;
+
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_unreserved(bo, true);
+ ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
if (unlikely(ret != 0))
return ret;
+ spin_lock(&glob->lru_lock);
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ret = -EBUSY;
+ goto err;
+ }
goto retry;
default:
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
+ goto err;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
}
@@ -194,6 +196,12 @@ retry_this_bo:
ttm_eu_list_ref_sub(list);
return 0;
+
+err:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 87aa5f5d3c8..cc6d90f28c7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -75,6 +75,8 @@ struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
bool active_16; /* active on the 16-bit channel */
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d4ab3beaada..9f4be3d4a02 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -22,9 +22,9 @@
#include <drm/drm_fb_helper.h>
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
-static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
+static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
static int fb_bpp = 16;
module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
struct urb *urb;
int aligned_x;
int bpp = (fb->base.bits_per_pixel / 8);
+ int x2, y2;
+ bool store_for_later = false;
+ unsigned long flags;
if (!fb->active_16)
return 0;
@@ -169,8 +172,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
}
}
- start_cycles = get_cycles();
-
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
@@ -180,19 +181,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
(y + height > fb->base.height))
return -EINVAL;
+ /* if we are in atomic just store the info
+ can't test inside spin lock */
+ if (in_atomic())
+ store_for_later = true;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&fb->dirty_lock, flags);
+
+ if (fb->y1 < y)
+ y = fb->y1;
+ if (fb->y2 > y2)
+ y2 = fb->y2;
+ if (fb->x1 < x)
+ x = fb->x1;
+ if (fb->x2 > x2)
+ x2 = fb->x2;
+
+ if (store_for_later) {
+ fb->x1 = x;
+ fb->x2 = x2;
+ fb->y1 = y;
+ fb->y2 = y2;
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ return 0;
+ }
+
+ fb->x1 = fb->y1 = INT_MAX;
+ fb->x2 = fb->y2 = 0;
+
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ start_cycles = get_cycles();
+
urb = udl_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i < y + height ; i++) {
+ for (i = y; i <= y2 ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ (x2 - x + 1) * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
@@ -422,7 +457,6 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
static const struct drm_framebuffer_funcs udlfb_funcs = {
.destroy = udl_user_framebuffer_destroy,
.dirty = udl_user_framebuffer_dirty,
- .create_handle = NULL,
};
@@ -434,16 +468,18 @@ udl_framebuffer_init(struct drm_device *dev,
{
int ret;
+ spin_lock_init(&ufb->dirty_lock);
ufb->obj = obj;
- ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
}
-static int udlfb_create(struct udl_fbdev *ufbdev,
+static int udlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct device *device = &dev->usbdev->dev;
@@ -521,27 +557,10 @@ out:
return ret;
}
-static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = udlfb_create(ufbdev, sizes);
- if (ret)
- return ret;
-
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.gamma_set = udl_crtc_fb_gamma_set,
.gamma_get = udl_crtc_fb_gamma_get,
- .fb_probe = udl_fb_find_or_create_single,
+ .fb_probe = udlfb_create,
};
static void udl_fbdev_destroy(struct drm_device *dev,
@@ -556,6 +575,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
framebuffer_release(info);
}
drm_fb_helper_fini(&ufbdev->helper);
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
}
@@ -583,6 +603,10 @@ int udl_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 142fee5f983..f343db73e09 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -75,15 +75,19 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
}
#endif
-static inline u16 pixel32_to_be16p(const uint8_t *pixel)
+static inline u16 pixel32_to_be16(const uint32_t pixel)
{
- uint32_t pix = *(uint32_t *)pixel;
- u16 retval;
+ return (((pixel >> 3) & 0x001f) |
+ ((pixel >> 5) & 0x07e0) |
+ ((pixel >> 8) & 0xf800));
+}
- retval = (((pix >> 3) & 0x001f) |
- ((pix >> 5) & 0x07e0) |
- ((pix >> 8) & 0xf800));
- return retval;
+static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
+{
+ if (bpp == 2)
+ return *(const uint16_t *)pixel == repeat;
+ else
+ return *(const uint32_t *)pixel == repeat;
}
/*
@@ -152,29 +156,33 @@ static void udl_compress_hline16(
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
- const u8 * const repeating_pixel = pixel;
-
- if (bpp == 2)
- *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
- else if (bpp == 4)
- *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
+ const u8 *const start = pixel;
+ u32 repeating_pixel;
+
+ if (bpp == 2) {
+ repeating_pixel = *(uint16_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
+ } else {
+ repeating_pixel = *(uint32_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
+ }
cmd += 2;
pixel += bpp;
if (unlikely((pixel < cmd_pixel_end) &&
- (!memcmp(pixel, repeating_pixel, bpp)))) {
+ (pixel_repeats(pixel, repeating_pixel, bpp)))) {
/* go back and fill in raw pixel count */
- *raw_pixels_count_byte = (((repeating_pixel -
+ *raw_pixels_count_byte = (((start -
raw_pixel_start) / bpp) + 1) & 0xFF;
- while ((pixel < cmd_pixel_end)
- && (!memcmp(pixel, repeating_pixel, bpp))) {
+ while ((pixel < cmd_pixel_end) &&
+ (pixel_repeats(pixel, repeating_pixel, bpp))) {
pixel += bpp;
}
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -223,6 +231,8 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+ BUG_ON(!(bpp == 2 || bpp == 4));
+
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 161f8b2549a..07dfd823cc3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -829,7 +829,7 @@ static void vmw_lastclose(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index d9fbbe19107..c509d40c489 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -131,7 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
struct vmw_resource *res;
uint32_t num_clips;
@@ -163,19 +163,15 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -199,9 +195,9 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
out_no_surface:
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
@@ -220,7 +216,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
uint32_t num_clips;
int ret;
@@ -251,24 +247,20 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->dmabuf) {
DRM_ERROR("Framebuffer not dmabuf backed.\n");
ret = -EINVAL;
- goto out_no_fb;
+ goto out_no_ttm_lock;
}
ret = ttm_read_lock(&vmaster->lock, true);
@@ -281,9 +273,9 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 54743943d8b..3e3c7ab33ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -180,16 +180,29 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
/* A lot of the code assumes this */
- if (handle && (width != 64 || height != 64))
- return -EINVAL;
+ if (handle && (width != 64 || height != 64)) {
+ ret = -EINVAL;
+ goto out;
+ }
if (handle) {
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
@@ -197,7 +210,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* takedown old cursor */
@@ -225,14 +239,20 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
- return 0;
+ ret = 0;
+ goto out;
}
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
- return 0;
+ ret = 0;
+out:
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
+ return ret;
}
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -244,10 +264,23 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
return 0;
}
@@ -373,16 +406,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
* Generic framebuffer code
*/
-int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- if (handle)
- *handle = 0;
-
- return 0;
-}
-
/*
* Surface framebuffer code
*/
@@ -610,7 +633,6 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -681,14 +703,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbs->base.base,
- &vmw_framebuffer_surface_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
/* XXX get the first 3 from the surface info */
@@ -707,10 +725,15 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
*out = &vfbs->base;
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbs->base.base);
+ vmw_surface_unreference(&surface);
out_err2:
kfree(vfbs);
out_err1:
@@ -960,7 +983,6 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
/**
@@ -1053,14 +1075,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
@@ -1077,10 +1095,15 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbd->base.base);
+ vmw_dmabuf_unreference(&dmabuf);
out_err2:
kfree(vfbd);
out_err1:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01a17b407b..16556170fb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -959,13 +959,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup && new_backup != res->backup) {
if (res->backup) {
- BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
res->backup = vmw_dmabuf_reference(new_backup);
- BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
list_add_tail(&res->mob_head, &new_backup->res_list);
}
if (new_backup)
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
deleted file mode 100644
index 419917955bf..00000000000
--- a/drivers/gpu/stub/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config STUB_POULSBO
- tristate "Intel GMA500 Stub Driver"
- depends on PCI
- depends on NET # for THERMAL
- # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- select THERMAL if ACPI
- help
- Choose this option if you have a system that has Intel GMA500
- (Poulsbo) integrated graphics. If M is selected, the module will
- be called Poulsbo. This driver is a stub driver for Poulsbo that
- will call poulsbo.ko to enable the acpi backlight control sysfs
- entry file because there have no poulsbo native driver can support
- intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
deleted file mode 100644
index cd940cc9d36..00000000000
--- a/drivers/gpu/stub/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
deleted file mode 100644
index 7edfd27b8de..00000000000
--- a/drivers/gpu/stub/poulsbo.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Intel Poulsbo Stub driver
- *
- * Copyright (C) 2010 Novell <jlee@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <acpi/video.h>
-
-#define DRIVER_NAME "poulsbo"
-
-enum {
- CHIP_PSB_8108 = 0,
- CHIP_PSB_8109 = 1,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
- {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
- {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
- {0, 0, 0}
-};
-
-static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- return acpi_video_register();
-}
-
-static void poulsbo_remove(struct pci_dev *pdev)
-{
- acpi_video_unregister();
-}
-
-static struct pci_driver poulsbo_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = poulsbo_probe,
- .remove = poulsbo_remove,
-};
-
-static int __init poulsbo_init(void)
-{
- return pci_register_driver(&poulsbo_driver);
-}
-
-static void __exit poulsbo_exit(void)
-{
- pci_unregister_driver(&poulsbo_driver);
-}
-
-module_init(poulsbo_init);
-module_exit(poulsbo_exit);
-
-MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
-MODULE_DESCRIPTION("Poulsbo Stub Driver");
-MODULE_LICENSE("GPL");
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fa60add0ff6..cf787e1d932 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -25,6 +25,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/console.h>
#include <linux/vga_switcheroo.h>
#include <linux/vgaarb.h>
@@ -337,8 +338,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->fb_info) {
struct fb_event event;
+ console_lock();
event.info = new_client->fb_info;
fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+ console_unlock();
}
ret = vgasr_priv.handler->switchto(new_client->id);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43d5c8b8e7a..0099667a397 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4255,13 +4255,19 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it:
+ * but needs it. Same seems to hold for the desktop versions.
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK (0xf << 8)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3a7965d6ac2..093f10c88cc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,8 +112,6 @@ source "drivers/staging/media/Kconfig"
source "drivers/staging/net/Kconfig"
-source "drivers/staging/omapdrm/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/ozwpan/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5971865d0c6..fa41b04cf4c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_USB_G_CCG) += ccg/
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
deleted file mode 100644
index abeeb00aaa1..00000000000
--- a/drivers/staging/omapdrm/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-TODO
-. add video decode/encode support (via syslink3 + codec-engine)
- . NOTE: with dmabuf this probably could be split into different driver
- so perhaps this TODO doesn't belong here
-. where should we do eviction (detatch_pages())? We aren't necessarily
- accessing the pages via a GART, so maybe we need some other threshold
- to put a cap on the # of pages that can be pin'd. (It is mostly only
- of interest in case you have a swap partition/file.. which a lot of
- these devices do not.. but it doesn't hurt for the driver to do the
- right thing anyways.)
- . Use mm_shrinker to trigger unpinning pages. Need to figure out how
- to handle next issue first (I think?)
- . Note TTM already has some mm_shrinker stuff.. maybe an argument to
- move to TTM? Or maybe something that could be factored out in common?
-. GEM/shmem backed pages can have existing mappings (kernel linear map,
- etc..), which isn't really ideal.
-. Revisit GEM sync object infrastructure.. TTM has some framework for this
- already. Possibly this could be refactored out and made more common?
- There should be some way to do this with less wheel-reinvention.
-. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
- access is made from any component in the system. Which means on suspend
- CRTC's should be disabled, and on resume the LUT should be reprogrammed
- before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
- buffer mapped in DMM/TILER before LUT is reloaded.
-
-Userspace:
-. git://github.com/robclark/xf86-video-omap.git
-
-Currently tested on
-. OMAP3530 beagleboard
-. OMAP4430 pandaboard
-. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
deleted file mode 100644
index f0ac34a8973..00000000000
--- a/drivers/staging/omapdrm/omap_drm.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/drm/omap_drm.h
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_DRM_H__
-#define __OMAP_DRM_H__
-
-#include <drm/drm.h>
-
-/* Please note that modifications to all structs defined here are
- * subject to backwards-compatibility constraints.
- */
-
-#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
-
-struct drm_omap_param {
- uint64_t param; /* in */
- uint64_t value; /* in (set_param), out (get_param) */
-};
-
-#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
-#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
-#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
-
-/* cache modes */
-#define OMAP_BO_CACHED 0x00000000 /* default */
-#define OMAP_BO_WC 0x00000002 /* write-combine */
-#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
-
-/* tiled modes */
-#define OMAP_BO_TILED_8 0x00000100
-#define OMAP_BO_TILED_16 0x00000200
-#define OMAP_BO_TILED_32 0x00000300
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
-
-union omap_gem_size {
- uint32_t bytes; /* (for non-tiled formats) */
- struct {
- uint16_t width;
- uint16_t height;
- } tiled; /* (for tiled formats) */
-};
-
-struct drm_omap_gem_new {
- union omap_gem_size size; /* in */
- uint32_t flags; /* in */
- uint32_t handle; /* out */
- uint32_t __pad;
-};
-
-/* mask of operations: */
-enum omap_gem_op {
- OMAP_GEM_READ = 0x01,
- OMAP_GEM_WRITE = 0x02,
-};
-
-struct drm_omap_gem_cpu_prep {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
-};
-
-struct drm_omap_gem_cpu_fini {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
- /* TODO maybe here we pass down info about what regions are touched
- * by sw so we can be clever about cache ops? For now a placeholder,
- * set to zero and we just do full buffer flush..
- */
- uint32_t nregions;
- uint32_t __pad;
-};
-
-struct drm_omap_gem_info {
- uint32_t handle; /* buffer handle (in) */
- uint32_t pad;
- uint64_t offset; /* mmap offset (out) */
- /* note: in case of tiled buffers, the user virtual size can be
- * different from the physical size (ie. how many pages are needed
- * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
- * This size here is the one that should be used if you want to
- * mmap() the buffer:
- */
- uint32_t size; /* virtual size for mmap'ing (out) */
- uint32_t __pad;
-};
-
-#define DRM_OMAP_GET_PARAM 0x00
-#define DRM_OMAP_SET_PARAM 0x01
-/* placeholder for plugin-api
-#define DRM_OMAP_GET_BASE 0x02
-*/
-#define DRM_OMAP_GEM_NEW 0x03
-#define DRM_OMAP_GEM_CPU_PREP 0x04
-#define DRM_OMAP_GEM_CPU_FINI 0x05
-#define DRM_OMAP_GEM_INFO 0x06
-#define DRM_OMAP_NUM_IOCTLS 0x07
-
-#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
-#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
-/* placeholder for plugin-api
-#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
-*/
-#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
-#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
-#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
-#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
-
-#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 6c4abeaf690..fbd447b390f 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -638,7 +638,7 @@ static inline void save_screen(struct vc_data *vc)
* Redrawing of screen
*/
-static void clear_buffer_attributes(struct vc_data *vc)
+void clear_buffer_attributes(struct vc_data *vc)
{
unsigned short *p = (unsigned short *)vc->vc_origin;
int count = vc->vc_screenbuf_size / 2;
@@ -2987,7 +2987,7 @@ int __init vty_init(const struct file_operations *console_fops)
static struct class *vtconsole_class;
-static int bind_con_driver(const struct consw *csw, int first, int last,
+static int do_bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
{
struct module *owner = csw->owner;
@@ -2998,7 +2998,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3083,11 +3083,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
retval = 0;
err:
- console_unlock();
module_put(owner);
return retval;
};
+
+static int bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+{
+ int ret;
+
+ console_lock();
+ ret = do_bind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return ret;
+}
+
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static int con_is_graphics(const struct consw *csw, int first, int last)
{
@@ -3124,6 +3135,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
*/
int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
+ int retval;
+
+ console_lock();
+ retval = do_unbind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unbind_con_driver);
+
+/* unlocked version of unbind_con_driver() */
+int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
struct con_driver *con_driver = NULL, *con_back = NULL;
@@ -3132,7 +3155,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3145,10 +3168,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
retval = -ENODEV;
@@ -3164,15 +3185,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
- if (!con_is_bound(csw)) {
- console_unlock();
+ if (!con_is_bound(csw))
goto err;
- }
first = max(first, con_driver->first);
last = min(last, con_driver->last);
@@ -3199,15 +3216,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
- console_unlock();
/* ignore return value, binding should not fail */
- bind_con_driver(defcsw, first, last, deflt);
+ do_bind_con_driver(defcsw, first, last, deflt);
err:
module_put(owner);
return retval;
}
-EXPORT_SYMBOL(unbind_con_driver);
+EXPORT_SYMBOL_GPL(do_unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
@@ -3492,28 +3508,18 @@ int con_debug_leave(void)
}
EXPORT_SYMBOL_GPL(con_debug_leave);
-/**
- * register_con_driver - register console driver to console layer
- * @csw: console driver
- * @first: the first console to take over, minimum value is 0
- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
- *
- * DESCRIPTION: This function registers a console driver which can later
- * bind to a range of consoles specified by @first and @last. It will
- * also initialize the console driver by calling con_startup().
- */
-int register_con_driver(const struct consw *csw, int first, int last)
+static int do_register_con_driver(const struct consw *csw, int first, int last)
{
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
int i, retval = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
-
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = &registered_con_driver[i];
@@ -3566,10 +3572,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
}
err:
- console_unlock();
module_put(owner);
return retval;
}
+
+/**
+ * register_con_driver - register console driver to console layer
+ * @csw: console driver
+ * @first: the first console to take over, minimum value is 0
+ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+ *
+ * DESCRIPTION: This function registers a console driver which can later
+ * bind to a range of consoles specified by @first and @last. It will
+ * also initialize the console driver by calling con_startup().
+ */
+int register_con_driver(const struct consw *csw, int first, int last)
+{
+ int retval;
+
+ console_lock();
+ retval = do_register_con_driver(csw, first, last);
+ console_unlock();
+ return retval;
+}
EXPORT_SYMBOL(register_con_driver);
/**
@@ -3585,9 +3610,18 @@ EXPORT_SYMBOL(register_con_driver);
*/
int unregister_con_driver(const struct consw *csw)
{
- int i, retval = -ENODEV;
+ int retval;
console_lock();
+ retval = do_unregister_con_driver(csw);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unregister_con_driver);
+
+int do_unregister_con_driver(const struct consw *csw)
+{
+ int i, retval = -ENODEV;
/* cannot unregister a bound driver */
if (con_is_bound(csw))
@@ -3613,27 +3647,53 @@ int unregister_con_driver(const struct consw *csw)
}
}
err:
- console_unlock();
return retval;
}
-EXPORT_SYMBOL(unregister_con_driver);
+EXPORT_SYMBOL_GPL(do_unregister_con_driver);
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
- * take_over_console is basically a register followed by unbind
+ * take_over_console is basically a register followed by unbind
+ */
+int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
+{
+ int err;
+
+ err = do_register_con_driver(csw, first, last);
+ /*
+ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+ * but not unregistered it.
+ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+ do_bind_con_driver(csw, first, last, deflt);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(do_take_over_console);
+
+/*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+ * take_over_console is basically a register followed by unbind
*/
int take_over_console(const struct consw *csw, int first, int last, int deflt)
{
int err;
err = register_con_driver(csw, first, last);
- /* if we get an busy error we still want to bind the console driver
+ /*
+ * If we get an busy error we still want to bind the console driver
* and return success, as we may have unbound the console driver
-  * but not unregistered it.
- */
+ * but not unregistered it.
+ */
if (err == -EBUSY)
err = 0;
if (!err)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 80cbd21b483..4c1546f71d5 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,8 +21,6 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
-source "drivers/gpu/stub/Kconfig"
-
config VGASTATE
tristate
default n
@@ -33,6 +31,30 @@ config VIDEO_OUTPUT_CONTROL
This framework adds support for low-level control of the video
output switch.
+config DISPLAY_TIMING
+ bool
+
+config VIDEOMODE
+ bool
+
+config OF_DISPLAY_TIMING
+ bool "Enable device tree display timing support"
+ depends on OF
+ select DISPLAY_TIMING
+ help
+ helper to parse display timings from the devicetree
+
+config OF_VIDEOMODE
+ bool "Enable device tree videomode support"
+ depends on OF
+ select VIDEOMODE
+ select OF_DISPLAY_TIMING
+ help
+ helper to get videomodes from the devicetree
+
+config HDMI
+ bool
+
menuconfig FB
tristate "Support for frame buffer devices"
---help---
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0577f834fdc..9df387334cb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_VGASTATE) += vgastate.o
+obj-$(CONFIG_HDMI) += hdmi.o
obj-y += fb_notify.o
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
@@ -170,3 +171,7 @@ obj-$(CONFIG_FB_VIRTUAL) += vfb.o
#video output switch sysfs driver
obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
+obj-$(CONFIG_DISPLAY_TIMING) += display_timing.o
+obj-$(CONFIG_OF_DISPLAY_TIMING) += of_display_timing.o
+obj-$(CONFIG_VIDEOMODE) += videomode.o
+obj-$(CONFIG_OF_VIDEOMODE) += of_videomode.o
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index f8a61e210d2..3cd67592782 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
return retval;
}
+static int do_fbcon_takeover(int show_logo)
+{
+ int err, i;
+
+ if (!num_registered_fb)
+ return -ENODEV;
+
+ if (!show_logo)
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = info_idx;
+
+ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (err) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = -1;
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
+ }
+
+ return err;
+}
+
static int fbcon_takeover(int show_logo)
{
int err, i;
@@ -815,6 +842,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
*
* Maps a virtual console @unit to a frame buffer device
* @newidx.
+ *
+ * This should be called with the console lock held.
*/
static int set_con2fb_map(int unit, int newidx, int user)
{
@@ -832,7 +861,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
info_idx = newidx;
- return fbcon_takeover(0);
+ return do_fbcon_takeover(0);
}
if (oldidx != -1)
@@ -840,7 +869,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
found = search_fb_in_map(newidx);
- console_lock();
con2fb_map[unit] = newidx;
if (!err && !found)
err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -867,7 +895,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- console_unlock();
return err;
}
@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
}
/* Setup default font */
- if (!p->fontdata) {
+ if (!p->fontdata && !vc->vc_font.data) {
if (!fontname[0] || !(font = find_font(fontname)))
font = get_default_font(info->var.xres,
info->var.yres,
@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
vc->vc_font.height = font->height;
vc->vc_font.data = (void *)(p->fontdata = font->data);
vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
+ } else {
+ p->fontdata = vc->vc_font.data;
}
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
-static void fbcon_free_font(struct display *p)
+static void fbcon_free_font(struct display *p, bool freefont)
{
- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
p->fontdata = NULL;
p->userfont = 0;
@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
struct fb_info *info;
struct fbcon_ops *ops;
int idx;
+ bool free_font = true;
- fbcon_free_font(p);
idx = con2fb_map[vc->vc_num];
if (idx == -1)
@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
+ if (info->flags & FBINFO_MISC_FIRMWARE)
+ free_font = false;
ops = info->fbcon_par;
if (!ops)
@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
ops->flags &= ~FBCON_FLAGS_INIT;
finished:
+ fbcon_free_font(p, free_font);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -2985,7 +3018,7 @@ static int fbcon_unbind(void)
{
int ret;
- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
if (!ret)
@@ -3000,6 +3033,7 @@ static inline int fbcon_unbind(void)
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+/* called with console_lock held */
static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
@@ -3026,6 +3060,7 @@ static int fbcon_fb_unbind(int idx)
return ret;
}
+/* called with console_lock held */
static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
@@ -3058,11 +3093,12 @@ static int fbcon_fb_unregistered(struct fb_info *info)
primary_device = -1;
if (!num_registered_fb)
- unregister_con_driver(&fb_con);
+ do_unregister_con_driver(&fb_con);
return 0;
}
+/* called with console_lock held */
static void fbcon_remap_all(int idx)
{
int i;
@@ -3107,6 +3143,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
}
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+/* called with console_lock held */
static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
@@ -3123,7 +3160,7 @@ static int fbcon_fb_registered(struct fb_info *info)
}
if (info_idx != -1)
- ret = fbcon_takeover(1);
+ ret = do_fbcon_takeover(1);
} else {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx)
@@ -3259,6 +3296,7 @@ static int fbcon_event_notify(struct notifier_block *self,
ret = fbcon_fb_unregistered(info);
break;
case FB_EVENT_SET_CONSOLE_MAP:
+ /* called with console lock held */
con2fb = event->data;
ret = set_con2fb_map(con2fb->console - 1,
con2fb->framebuffer, 1);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index d449a74d4a3..5855d17d19a 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
unsigned short video_port_status = vga_video_port_reg + 6;
int font_select = 0x00, beg, i;
char *charmap;
-
+ bool clear_attribs = false;
if (vga_video_type != VIDEO_TYPE_EGAM) {
charmap = (char *) VGA_MAP_MEM(colourmap, 0);
beg = 0x0e;
@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
/* if 512 char mode is already enabled don't re-enable it. */
if ((set) && (ch512 != vga_512_chars)) {
- /* attribute controller */
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &vga_con)
- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
- }
vga_512_chars = ch512;
/* 256-char: enable intensity bit
512-char: disable intensity bit */
@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
it means, but it works, and it appears necessary */
inb_p(video_port_status);
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
+ clear_attribs = true;
}
raw_spin_unlock_irq(&vga_lock);
+
+ if (clear_attribs) {
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ struct vc_data *c = vc_cons[i].d;
+ if (c && c->vc_sw == &vga_con) {
+ /* force hi font mask to 0, so we always clear
+ the bit on either transition */
+ c->vc_hi_font_mask = 0x00;
+ clear_buffer_attributes(c);
+ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+ }
+ }
+ }
return 0;
}
diff --git a/drivers/video/display_timing.c b/drivers/video/display_timing.c
new file mode 100644
index 00000000000..5e1822cef57
--- /dev/null
+++ b/drivers/video/display_timing.c
@@ -0,0 +1,24 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+
+void display_timings_release(struct display_timings *disp)
+{
+ if (disp->timings) {
+ unsigned int i;
+
+ for (i = 0; i < disp->num_timings; i++)
+ kfree(disp->timings[i]);
+ kfree(disp->timings);
+ }
+ kfree(disp);
+}
+EXPORT_SYMBOL_GPL(display_timings_release);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 3ff0105a496..dc61c12ecf8 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1177,8 +1177,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
+ console_lock();
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+ console_unlock();
unlock_fb_info(info);
break;
case FBIOBLANK:
@@ -1650,7 +1652,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
event.info = fb_info;
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ console_unlock();
unlock_fb_info(fb_info);
return 0;
}
@@ -1666,8 +1670,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+ console_unlock();
unlock_fb_info(fb_info);
if (ret)
@@ -1682,7 +1688,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+ console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
@@ -1853,11 +1861,8 @@ int fb_new_modelist(struct fb_info *info)
err = 1;
if (!list_empty(&info->modelist)) {
- if (!lock_fb_info(info))
- return -ENODEV;
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
- unlock_fb_info(info);
}
return err;
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index cef65574db6..94ad0f71383 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -31,6 +31,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <video/edid.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -1373,6 +1375,98 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
kfree(timings);
return err;
}
+
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode)
+{
+ unsigned int htotal, vtotal;
+
+ fbmode->xres = vm->hactive;
+ fbmode->left_margin = vm->hback_porch;
+ fbmode->right_margin = vm->hfront_porch;
+ fbmode->hsync_len = vm->hsync_len;
+
+ fbmode->yres = vm->vactive;
+ fbmode->upper_margin = vm->vback_porch;
+ fbmode->lower_margin = vm->vfront_porch;
+ fbmode->vsync_len = vm->vsync_len;
+
+ /* prevent division by zero in KHZ2PICOS macro */
+ fbmode->pixclock = vm->pixelclock ?
+ KHZ2PICOS(vm->pixelclock / 1000) : 0;
+
+ fbmode->sync = 0;
+ fbmode->vmode = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ fbmode->vmode |= FB_VMODE_INTERLACED;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ fbmode->vmode |= FB_VMODE_DOUBLE;
+ fbmode->flag = 0;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hback_porch +
+ vm->hsync_len;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
+ vm->vsync_len;
+ /* prevent division by zero */
+ if (htotal && vtotal) {
+ fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ /* a mode must have htotal and vtotal != 0 or it is invalid */
+ } else {
+ fbmode->refresh = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fb_videomode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+static inline void dump_fb_videomode(const struct fb_videomode *m)
+{
+ pr_debug("fb_videomode = %ux%u@%uHz (%ukHz) %u %u %u %u %u %u %u %u %u\n",
+ m->xres, m->yres, m->refresh, m->pixclock, m->left_margin,
+ m->right_margin, m->upper_margin, m->lower_margin,
+ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
+}
+
+/**
+ * of_get_fb_videomode - get a fb_videomode from devicetree
+ * @np: device_node with the timing specification
+ * @fb: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * DESCRIPTION:
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings ond
+ * work with that instead.
+ */
+int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb,
+ int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ fb_videomode_from_videomode(&vm, fb);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ dump_fb_videomode(fb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_fb_videomode);
+#endif
+
#else
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
{
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index a55e3669d13..ef476b02fbe 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,6 +177,8 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
console_lock();
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
@@ -188,6 +190,7 @@ static ssize_t store_modes(struct device *device,
fb_destroy_modelist(&old_list);
console_unlock();
+ unlock_fb_info(fb_info);
return 0;
}
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
new file mode 100644
index 00000000000..ab23c9b7914
--- /dev/null
+++ b/drivers/video/hdmi.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hdmi.h>
+#include <linux/string.h>
+
+static void hdmi_infoframe_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ ptr[3] = 256 - csum;
+}
+
+/**
+ * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AVI;
+ frame->version = 2;
+ frame->length = 13;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+
+/**
+ * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
+
+ if (frame->active_info_valid)
+ ptr[0] |= BIT(4);
+
+ if (frame->horizontal_bar_valid)
+ ptr[0] |= BIT(3);
+
+ if (frame->vertical_bar_valid)
+ ptr[0] |= BIT(2);
+
+ ptr[1] = ((frame->colorimetry & 0x3) << 6) |
+ ((frame->picture_aspect & 0x3) << 4) |
+ (frame->active_aspect & 0xf);
+
+ ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
+ ((frame->quantization_range & 0x3) << 2) |
+ (frame->nups & 0x3);
+
+ if (frame->itc)
+ ptr[2] |= BIT(7);
+
+ ptr[3] = frame->video_code & 0x7f;
+
+ ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
+ ((frame->content_type & 0x3) << 4) |
+ (frame->pixel_repeat & 0xf);
+
+ ptr[5] = frame->top_bar & 0xff;
+ ptr[6] = (frame->top_bar >> 8) & 0xff;
+ ptr[7] = frame->bottom_bar & 0xff;
+ ptr[8] = (frame->bottom_bar >> 8) & 0xff;
+ ptr[9] = frame->left_bar & 0xff;
+ ptr[10] = (frame->left_bar >> 8) & 0xff;
+ ptr[11] = frame->right_bar & 0xff;
+ ptr[12] = (frame->right_bar >> 8) & 0xff;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
+
+/**
+ * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @vendor: vendor string
+ * @product: product string
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_SPD;
+ frame->version = 1;
+ frame->length = 25;
+
+ strncpy(frame->vendor, vendor, sizeof(frame->vendor));
+ strncpy(frame->product, product, sizeof(frame->product));
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+
+/**
+ * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ memcpy(ptr, frame->vendor, sizeof(frame->vendor));
+ memcpy(ptr + 8, frame->product, sizeof(frame->product));
+
+ ptr[24] = frame->sdi;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
+
+/**
+ * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
+ frame->version = 1;
+ frame->length = 10;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+
+/**
+ * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * @frame: HDMI audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ unsigned char channels;
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ ptr[2] = frame->coding_type_ext & 0x1f;
+ ptr[3] = frame->channel_allocation;
+ ptr[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ ptr[4] |= BIT(7);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
+ * buffer
+ * @frame: HDMI vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
new file mode 100644
index 00000000000..13ecd989701
--- /dev/null
+++ b/drivers/video/of_display_timing.c
@@ -0,0 +1,239 @@
+/*
+ * OF helpers for parsing display timings
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * based on of_videomode.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+/**
+ * parse_timing_property - parse timing_entry from device_node
+ * @np: device_node with the property
+ * @name: name of the property
+ * @result: will be set to the return value
+ *
+ * DESCRIPTION:
+ * Every display_timing can be specified with either just the typical value or
+ * a range consisting of min/typ/max. This function helps handling this
+ **/
+static int parse_timing_property(struct device_node *np, const char *name,
+ struct timing_entry *result)
+{
+ struct property *prop;
+ int length, cells, ret;
+
+ prop = of_find_property(np, name, &length);
+ if (!prop) {
+ pr_err("%s: could not find property %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ cells = length / sizeof(u32);
+ if (cells == 1) {
+ ret = of_property_read_u32(np, name, &result->typ);
+ result->min = result->typ;
+ result->max = result->typ;
+ } else if (cells == 3) {
+ ret = of_property_read_u32_array(np, name, &result->min, cells);
+ } else {
+ pr_err("%s: illegal timing specification in %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * of_get_display_timing - parse display_timing entry from device_node
+ * @np: device_node with the properties
+ **/
+static struct display_timing *of_get_display_timing(struct device_node *np)
+{
+ struct display_timing *dt;
+ u32 val = 0;
+ int ret = 0;
+
+ dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ if (!dt) {
+ pr_err("%s: could not allocate display_timing struct\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ ret |= parse_timing_property(np, "hback-porch", &dt->hback_porch);
+ ret |= parse_timing_property(np, "hfront-porch", &dt->hfront_porch);
+ ret |= parse_timing_property(np, "hactive", &dt->hactive);
+ ret |= parse_timing_property(np, "hsync-len", &dt->hsync_len);
+ ret |= parse_timing_property(np, "vback-porch", &dt->vback_porch);
+ ret |= parse_timing_property(np, "vfront-porch", &dt->vfront_porch);
+ ret |= parse_timing_property(np, "vactive", &dt->vactive);
+ ret |= parse_timing_property(np, "vsync-len", &dt->vsync_len);
+ ret |= parse_timing_property(np, "clock-frequency", &dt->pixelclock);
+
+ dt->dmt_flags = 0;
+ dt->data_flags = 0;
+ if (!of_property_read_u32(np, "vsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_VSYNC_HIGH :
+ VESA_DMT_VSYNC_LOW;
+ if (!of_property_read_u32(np, "hsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_HSYNC_HIGH :
+ VESA_DMT_HSYNC_LOW;
+ if (!of_property_read_u32(np, "de-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_DE_HIGH :
+ DISPLAY_FLAGS_DE_LOW;
+ if (!of_property_read_u32(np, "pixelclk-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+
+ if (of_property_read_bool(np, "interlaced"))
+ dt->data_flags |= DISPLAY_FLAGS_INTERLACED;
+ if (of_property_read_bool(np, "doublescan"))
+ dt->data_flags |= DISPLAY_FLAGS_DOUBLESCAN;
+
+ if (ret) {
+ pr_err("%s: error reading timing properties\n",
+ of_node_full_name(np));
+ kfree(dt);
+ return NULL;
+ }
+
+ return dt;
+}
+
+/**
+ * of_get_display_timings - parse all display_timing entries from a device_node
+ * @np: device_node with the subnodes
+ **/
+struct display_timings *of_get_display_timings(struct device_node *np)
+{
+ struct device_node *timings_np;
+ struct device_node *entry;
+ struct device_node *native_mode;
+ struct display_timings *disp;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", of_node_full_name(np));
+ return NULL;
+ }
+
+ timings_np = of_find_node_by_name(np, "display-timings");
+ if (!timings_np) {
+ pr_err("%s: could not find display-timings node\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp) {
+ pr_err("%s: could not allocate struct disp'\n",
+ of_node_full_name(np));
+ goto dispfail;
+ }
+
+ entry = of_parse_phandle(timings_np, "native-mode", 0);
+ /* assume first child as native mode if none provided */
+ if (!entry)
+ entry = of_get_next_child(np, NULL);
+ /* if there is no child, it is useless to go on */
+ if (!entry) {
+ pr_err("%s: no timing specifications given\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ pr_debug("%s: using %s as default timing\n",
+ of_node_full_name(np), entry->name);
+
+ native_mode = entry;
+
+ disp->num_timings = of_get_child_count(timings_np);
+ if (disp->num_timings == 0) {
+ /* should never happen, as entry was already found above */
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->timings = kzalloc(sizeof(struct display_timing *) *
+ disp->num_timings, GFP_KERNEL);
+ if (!disp->timings) {
+ pr_err("%s: could not allocate timings array\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->num_timings = 0;
+ disp->native_mode = 0;
+
+ for_each_child_of_node(timings_np, entry) {
+ struct display_timing *dt;
+
+ dt = of_get_display_timing(entry);
+ if (!dt) {
+ /*
+ * to not encourage wrong devicetrees, fail in case of
+ * an error
+ */
+ pr_err("%s: error in timing %d\n",
+ of_node_full_name(np), disp->num_timings + 1);
+ goto timingfail;
+ }
+
+ if (native_mode == entry)
+ disp->native_mode = disp->num_timings;
+
+ disp->timings[disp->num_timings] = dt;
+ disp->num_timings++;
+ }
+ of_node_put(timings_np);
+ /*
+ * native_mode points to the device_node returned by of_parse_phandle
+ * therefore call of_node_put on it
+ */
+ of_node_put(native_mode);
+
+ pr_debug("%s: got %d timings. Using timing #%d as default\n",
+ of_node_full_name(np), disp->num_timings,
+ disp->native_mode + 1);
+
+ return disp;
+
+timingfail:
+ if (native_mode)
+ of_node_put(native_mode);
+ display_timings_release(disp);
+entryfail:
+ kfree(disp);
+dispfail:
+ of_node_put(timings_np);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(of_get_display_timings);
+
+/**
+ * of_display_timings_exist - check if a display-timings node is provided
+ * @np: device_node with the timing
+ **/
+int of_display_timings_exist(struct device_node *np)
+{
+ struct device_node *timings_np;
+
+ if (!np)
+ return -EINVAL;
+
+ timings_np = of_parse_phandle(np, "display-timings", 0);
+ if (!timings_np)
+ return -EINVAL;
+
+ of_node_put(timings_np);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(of_display_timings_exist);
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
new file mode 100644
index 00000000000..5b8066cd397
--- /dev/null
+++ b/drivers/video/of_videomode.c
@@ -0,0 +1,54 @@
+/*
+ * generic videomode helper
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/**
+ * of_get_videomode - get the videomode #<index> from devicetree
+ * @np - devicenode with the display_timings
+ * @vm - set to return value
+ * @index - index into list of display_timings
+ * (Set this to OF_USE_NATIVE_MODE to use whatever mode is
+ * specified as native mode in the DT.)
+ *
+ * DESCRIPTION:
+ * Get a list of all display timings and put the one
+ * specified by index into *vm. This function should only be used, if
+ * only one videomode is to be retrieved. A driver that needs to work
+ * with multiple/all videomodes should work with
+ * of_get_display_timings instead.
+ **/
+int of_get_videomode(struct device_node *np, struct videomode *vm,
+ int index)
+{
+ struct display_timings *disp;
+ int ret;
+
+ disp = of_get_display_timings(np);
+ if (!disp) {
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (index == OF_USE_NATIVE_MODE)
+ index = disp->native_mode;
+
+ ret = videomode_from_timing(disp, vm, index);
+ if (ret)
+ return ret;
+
+ display_timings_release(disp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 80233dae358..22450908306 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -1467,10 +1467,10 @@ void viafb_set_vclock(u32 clk, int set_iga)
via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres)
{
- struct display_timing timing;
+ struct via_display_timing timing;
u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2;
timing.hor_addr = cxres;
@@ -1491,7 +1491,7 @@ struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga)
{
- struct display_timing crt_reg = var_to_timing(var,
+ struct via_display_timing crt_reg = var_to_timing(var,
cxres ? cxres : var->xres, cyres ? cyres : var->yres);
if (iga == IGA1)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index a8205754c73..3be073c58b0 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -637,7 +637,7 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres);
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 980ee1b1dcf..5d21ff436ec 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -549,7 +549,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
u32 clock;
- struct display_timing timing;
+ struct via_display_timing timing;
struct fb_var_screeninfo panel_var;
const struct fb_videomode *mode_crt_table, *panel_crt_table;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 3158dfc90be..65c65c611e0 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -319,7 +319,7 @@ struct crt_mode_table {
int refresh_rate;
int h_sync_polarity;
int v_sync_polarity;
- struct display_timing crtc;
+ struct via_display_timing crtc;
};
struct io_reg {
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
index 0e431aee17b..0b414b09b9b 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/via/via_modesetting.c
@@ -30,9 +30,9 @@
#include "debug.h"
-void via_set_primary_timing(const struct display_timing *timing)
+void via_set_primary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total / 8 - 5;
raw.hor_addr = timing->hor_addr / 8 - 1;
@@ -88,9 +88,9 @@ void via_set_primary_timing(const struct display_timing *timing)
via_write_reg_mask(VIACR, 0x17, 0x80, 0x80);
}
-void via_set_secondary_timing(const struct display_timing *timing)
+void via_set_secondary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total - 1;
raw.hor_addr = timing->hor_addr - 1;
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
index 06e09fe351a..f6a6503da3b 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/via/via_modesetting.h
@@ -33,7 +33,7 @@
#define VIA_PITCH_MAX 0x3FF8
-struct display_timing {
+struct via_display_timing {
u16 hor_total;
u16 hor_addr;
u16 hor_blank_start;
@@ -49,8 +49,8 @@ struct display_timing {
};
-void via_set_primary_timing(const struct display_timing *timing);
-void via_set_secondary_timing(const struct display_timing *timing);
+void via_set_primary_timing(const struct via_display_timing *timing);
+void via_set_secondary_timing(const struct via_display_timing *timing);
void via_set_primary_address(u32 addr);
void via_set_secondary_address(u32 addr);
void via_set_primary_pitch(u32 pitch);
diff --git a/drivers/video/videomode.c b/drivers/video/videomode.c
new file mode 100644
index 00000000000..21c47a202af
--- /dev/null
+++ b/drivers/video/videomode.c
@@ -0,0 +1,39 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
+int videomode_from_timing(const struct display_timings *disp,
+ struct videomode *vm, unsigned int index)
+{
+ struct display_timing *dt;
+
+ dt = display_timings_get(disp, index);
+ if (!dt)
+ return -EINVAL;
+
+ vm->pixelclock = display_timing_get_value(&dt->pixelclock, TE_TYP);
+ vm->hactive = display_timing_get_value(&dt->hactive, TE_TYP);
+ vm->hfront_porch = display_timing_get_value(&dt->hfront_porch, TE_TYP);
+ vm->hback_porch = display_timing_get_value(&dt->hback_porch, TE_TYP);
+ vm->hsync_len = display_timing_get_value(&dt->hsync_len, TE_TYP);
+
+ vm->vactive = display_timing_get_value(&dt->vactive, TE_TYP);
+ vm->vfront_porch = display_timing_get_value(&dt->vfront_porch, TE_TYP);
+ vm->vback_porch = display_timing_get_value(&dt->vback_porch, TE_TYP);
+ vm->vsync_len = display_timing_get_value(&dt->vsync_len, TE_TYP);
+
+ vm->dmt_flags = dt->dmt_flags;
+ vm->data_flags = dt->data_flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videomode_from_timing);