aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-02-05 10:32:44 +1000
committerDave Airlie <airlied@redhat.com>2015-02-05 10:32:44 +1000
commit225963dd3eb71357b086bc091a5594d0fa2ac783 (patch)
tree89ae69cbeaa77d1e640628b68b93f42cfdbdadef /drivers/gpu
parente4bf44b3b558742fb7c58b4d34e206c8942f07e6 (diff)
parentb838cbee0d6f0234406e435032b2304f3d05515d (diff)
Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2015-01-30: - chv rps improvements from Ville - atomic state handling prep work from Ander - execlist request tracking refactoring from Nick Hoath - forcewake code consolidation from Chris&Mika - fastboot plane config refactoring and skl support from Damien - some more skl pm patches all over (Damien) - refactor dsi code to use drm dsi helpers and drm_panel infrastructure (Jani) - first cut at experimental atomic plane updates (Matt Roper) - piles of smaller things all over, as usual * 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (102 commits) drm/i915: Remove bogus locking check in the hangcheck code drm/i915: Update DRIVER_DATE to 20150130 drm/i915: Use pipe_config's cpu_transcoder for reading encoder hw state drm/i915: Fix a use-after-free in intel_execlists_retire_requests drm/i915: Split shared dpll setup out of __intel_set_mode() drm/i915: Don't do posting reads on getting forcewake drm/i915: Do uncore early sanitize after domain init drm/i915: Handle CHV in vlv_set_rps_idle() drm/i915: Remove nested work in gpu error handling drm/i915/documentation: Add intel_uncore.c to drm.tmpl drm/i915/dsi: remove intel_dsi_cmd.c and the unused functions therein drm/i915/dsi: move dpi_send_cmd() to intel_dsi.c and make it static drm/i915/dsi: remove old read/write functions in favor of new stuff drm/i915/dsi: make the vbt panel driver use mipi_dsi_device for transfers drm/i915/dsi: add drm mipi dsi host support drm/i915/dsi: switch to drm_panel interface drm/i915/skl: Enabling PSR on Skylake Revert "drm/i915: Fix mutex->owner inspection race under DEBUG_MUTEXES" drm/i915: Be consistent on printing seqnos drm/i915: Display current hangcheck status in debugfs ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c195
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h176
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c77
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h28
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c133
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c237
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c116
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c105
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1071
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c232
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c19
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h115
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c359
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h69
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c432
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h78
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c290
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c23
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c15
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c187
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h41
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c19
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c262
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c34
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h14
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c31
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c62
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1126
48 files changed, 3177 insertions, 2681 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 4e39ab34eb1c..74acca9bcd9d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,8 @@ config DRM_I915
select SHMEM
select TMPFS
select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_MIPI_DSI
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 16e3dc350274..f01922591679 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -66,12 +66,12 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ intel_atomic.o \
intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
intel_dp_mst.o \
- intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d11cbe9f80c..211d4949a675 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -123,7 +123,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -569,7 +569,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_engine_cs *ring =
i915_gem_request_get_ring(work->flip_queued_req);
- seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
+ seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
ring->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
@@ -658,7 +658,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
list_for_each_entry(gem_request,
&ring->request_list,
list) {
- seq_printf(m, " %d @ %d\n",
+ seq_printf(m, " %x @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
@@ -676,7 +676,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %u\n",
+ seq_printf(m, "Current sequence (%s): %x\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -1105,7 +1105,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
if (ret)
goto out;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1113,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf >>= 24;
else
reqf >>= 25;
- reqf *= GT_FREQUENCY_MULTIPLIER;
+ reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -1130,9 +1130,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- cagf *= GT_FREQUENCY_MULTIPLIER;
+ cagf = intel_gpu_freq(dev_priv, cagf);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev) || IS_GEN7(dev)) {
@@ -1178,18 +1178,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1199,16 +1199,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
- seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
@@ -1219,6 +1220,41 @@ out:
return ret;
}
+static int i915_hangcheck_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+ struct intel_engine_cs *ring;
+ int i;
+
+ if (!i915.enable_hangcheck) {
+ seq_printf(m, "Hangcheck disabled\n");
+ return 0;
+ }
+
+ if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
+ seq_printf(m, "Hangcheck active, fires in %dms\n",
+ jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+ jiffies));
+ } else
+ seq_printf(m, "Hangcheck inactive\n");
+
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "%s:\n", ring->name);
+ seq_printf(m, "\tseqno = %x [current %x]\n",
+ ring->hangcheck.seqno, ring->get_seqno(ring, false));
+ seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+ seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
+ seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+ (long long)ring->hangcheck.acthd,
+ (long long)intel_ring_get_active_head(ring));
+ seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
+ (long long)ring->hangcheck.max_acthd);
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1288,14 +1324,31 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
-static int vlv_drpc_info(struct seq_file *m)
+static int i915_forcewake_domains(struct seq_file *m, void *data)
{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ int i;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ for_each_fw_domain(fw_domain, dev_priv, i) {
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(i),
+ fw_domain->wake_count);
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return 0;
+}
+static int vlv_drpc_info(struct seq_file *m)
+{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1, pw_status;
- unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
@@ -1327,22 +1380,11 @@ static int vlv_drpc_info(struct seq_file *m)
seq_printf(m, "Media RC6 residency since boot: %u\n",
I915_READ(VLV_GT_MEDIA_RC6));
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
- seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
-
-
- return 0;
+ return i915_forcewake_domains(m, NULL);
}
-
static int gen6_drpc_info(struct seq_file *m)
{
-
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1356,7 +1398,7 @@ static int gen6_drpc_info(struct seq_file *m)
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.forcewake_count;
+ forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
@@ -1671,7 +1713,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- gpu_freq * GT_FREQUENCY_MULTIPLIER,
+ intel_gpu_freq(dev_priv, gpu_freq),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1928,7 +1970,7 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
- struct intel_ctx_submit_request *head_req = NULL;
+ struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
@@ -1961,7 +2003,7 @@ static int i915_execlists(struct seq_file *m, void *data)
list_for_each(cursor, &ring->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request, execlist_link);
+ struct drm_i915_gem_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
@@ -1984,30 +2026,6 @@ static int i915_execlists(struct seq_file *m, void *data)
return 0;
}
-static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
-
- spin_lock_irq(&dev_priv->uncore.lock);
- if (IS_VALLEYVIEW(dev)) {
- fw_rendercount = dev_priv->uncore.fw_rendercount;
- fw_mediacount = dev_priv->uncore.fw_mediacount;
- } else
- forcewake_count = dev_priv->uncore.forcewake_count;
- spin_unlock_irq(&dev_priv->uncore.lock);
-
- if (IS_VALLEYVIEW(dev)) {
- seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
- seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
- } else
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2400,6 +2418,14 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUDIO";
case POWER_DOMAIN_PLLS:
return "PLLS";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
case POWER_DOMAIN_INIT:
return "INIT";
default:
@@ -2628,7 +2654,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
+ yesno(crtc->active), crtc->config->pipe_src_w,
+ crtc->config->pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
@@ -3362,9 +3389,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config.pch_pfit.enabled) {
- crtc->config.pch_pfit.force_thru = true;
+ if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config->pch_pfit.enabled) {
+ crtc->config->pch_pfit.force_thru = true;
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3388,8 +3415,8 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
- if (crtc->config.pch_pfit.force_thru) {
- crtc->config.pch_pfit.force_thru = false;
+ if (crtc->config->pch_pfit.force_thru) {
+ crtc->config->pch_pfit.force_thru = false;
dev_priv->display.crtc_disable(&crtc->base);
dev_priv->display.crtc_enable(&crtc->base);
@@ -3942,6 +3969,17 @@ i915_wedged_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * There is no safeguard against this debugfs entry colliding
+ * with the hangcheck calling same i915_handle_error() in
+ * parallel, causing an explosion. For now we assume that the
+ * test harness is responsible enough not to inject gpu hangs
+ * while it is writing to 'i915_wedged'
+ */
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return -EAGAIN;
+
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val,
@@ -4128,10 +4166,7 @@ i915_max_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4160,12 +4195,12 @@ i915_max_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go above the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4209,10 +4244,7 @@ i915_min_freq_get(void *data, u64 *val)
if (ret)
return ret;
- if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -4241,12 +4273,12 @@ i915_min_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go below the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
} else {
- do_div(val, GT_FREQUENCY_MULTIPLIER);
+ val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
@@ -4338,7 +4370,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
return 0;
}
@@ -4351,7 +4384,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -4414,6 +4448,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
+ {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
@@ -4425,7 +4460,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_execlists", i915_execlists, 0},
- {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2447de36de44..1a46787129e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -92,6 +92,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]);
break;
+ case I915_PARAM_HAS_BSD2:
+ value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+ break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
@@ -601,6 +604,17 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_pipes = 0;
}
}
+
+ if (IS_CHERRYVIEW(dev)) {
+ u32 fuse, mask_eu;
+
+ fuse = I915_READ(CHV_FUSE_GT);
+ mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK |
+ CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total = 16 - hweight32(mask_eu);
+ }
}
/**
@@ -776,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
+ dev_priv->gpu_error.hangcheck_wq =
+ alloc_ordered_workqueue("i915-hangcheck", 0);
+ if (dev_priv->gpu_error.hangcheck_wq == NULL) {
+ DRM_ERROR("Failed to create our hangcheck workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freedpwq;
+ }
+
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
@@ -850,6 +872,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+out_freedpwq:
destroy_workqueue(dev_priv->dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
@@ -920,8 +944,7 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
- cancel_work_sync(&dev_priv->gpu_error.work);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -946,6 +969,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 308774f42079..5f50e7033ef7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1365,8 +1365,6 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
- assert_force_wake_inactive(dev_priv);
-
DRM_DEBUG_KMS("Suspending device\n");
/*
@@ -1404,7 +1402,8 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true;
/*
@@ -1432,6 +1431,8 @@ static int intel_runtime_suspend(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
+ assert_forcewakes_inactive(dev_priv);
+
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -1642,6 +1643,14 @@ static int __init i915_init(void)
#endif
}
+ /*
+ * FIXME: Note that we're lying to the DRM core here so that we can get access
+ * to the atomic ioctl and the atomic properties. Only plane operations on
+ * a single CRTC will actually work.
+ */
+ if (i915.nuclear_pageflip)
+ driver.driver_features |= DRIVER_ATOMIC;
+
return drm_pci_init(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ede48628bf85..d8b4d0a887f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150117"
+#define DRIVER_DATE "20150130"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -184,6 +184,10 @@ enum intel_display_power_domain {
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -499,8 +503,8 @@ struct drm_i915_error_state {
struct intel_connector;
struct intel_encoder;
-struct intel_crtc_config;
-struct intel_plane_config;
+struct intel_crtc_state;
+struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
@@ -538,10 +542,11 @@ struct drm_i915_display_funcs {
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_config *);
- void (*get_plane_config)(struct intel_crtc *,
- struct intel_plane_config *);
- int (*crtc_compute_clock)(struct intel_crtc *crtc);
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
@@ -574,11 +579,28 @@ struct drm_i915_display_funcs {
void (*enable_backlight)(struct intel_connector *connector);
};
+enum forcewake_domain_id {
+ FW_DOMAIN_ID_RENDER = 0,
+ FW_DOMAIN_ID_BLITTER,
+ FW_DOMAIN_ID_MEDIA,
+
+ FW_DOMAIN_ID_COUNT
+};
+
+enum forcewake_domains {
+ FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_ALL = (FORCEWAKE_RENDER |
+ FORCEWAKE_BLITTER |
+ FORCEWAKE_MEDIA)
+};
+
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- int fw_engine);
+ enum forcewake_domains domains);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -601,14 +623,31 @@ struct intel_uncore {
struct intel_uncore_funcs funcs;
unsigned fifo_count;
- unsigned forcewake_count;
-
- unsigned fw_rendercount;
- unsigned fw_mediacount;
- unsigned fw_blittercount;
-
- struct timer_list force_wake_timer;
-};
+ enum forcewake_domains fw_domains;
+
+ struct intel_uncore_forcewake_domain {
+ struct drm_i915_private *i915;
+ enum forcewake_domain_id id;
+ unsigned wake_count;
+ struct timer_list timer;
+ u32 reg_set;
+ u32 val_set;
+ u32 val_clear;
+ u32 reg_ack;
+ u32 reg_post;
+ u32 val_reset;
+ } fw_domain[FW_DOMAIN_ID_COUNT];
+};
+
+/* Iterate over initialised fw domains */
+#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
+ for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+ (i__) < FW_DOMAIN_ID_COUNT; \
+ (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
+ if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+
+#define for_each_fw_domain(domain__, dev_priv__, i__) \
+ for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
@@ -653,6 +692,7 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
+ unsigned int eu_total;
};
#undef DEFINE_FLAG
@@ -725,7 +765,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int unpin_count;
+ int pin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -776,11 +816,33 @@ struct i915_fbc {
} no_fbc_reason;
};
-struct i915_drrs {
- struct intel_connector *connector;
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum drrs_refresh_rate_type {
+ DRRS_HIGH_RR,
+ DRRS_LOW_RR,
+ DRRS_MAX_RR, /* RR count */
+};
+
+enum drrs_support_type {
+ DRRS_NOT_SUPPORTED = 0,
+ STATIC_DRRS_SUPPORT = 1,
+ SEAMLESS_DRRS_SUPPORT = 2
};
struct intel_dp;
+struct i915_drrs {
+ struct mutex mutex;
+ struct delayed_work work;
+ struct intel_dp *dp;
+ unsigned busy_frontbuffer_bits;
+ enum drrs_refresh_rate_type refresh_rate_type;
+ enum drrs_support_type type;
+};
+
struct i915_psr {
struct mutex lock;
bool sink_support;
@@ -1283,14 +1345,13 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
- struct timer_list hangcheck_timer;
+ struct workqueue_struct *hangcheck_wq;
+ struct delayed_work hangcheck_work;
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
- struct work_struct work;
-
unsigned long missed_irq_rings;
@@ -1360,12 +1421,6 @@ struct ddi_vbt_port_info {
uint8_t supports_dp:1;
};
-enum drrs_support_type {
- DRRS_NOT_SUPPORTED = 0,
- STATIC_DRRS_SUPPORT = 1,
- SEAMLESS_DRRS_SUPPORT = 2
-};
-
enum psr_lines_to_wait {
PSR_0_LINES_TO_WAIT = 0,
PSR_1_LINE_TO_WAIT,
@@ -1999,6 +2054,7 @@ struct drm_i915_gem_object {
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
unsigned int has_dma_mapping:1;
@@ -2071,7 +2127,14 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the start of the request */
u32 head;
- /** Position in the ringbuffer of the end of the request */
+ /**
+ * Position in the ringbuffer of the start of the postfix.
+ * This is required to calculate the maximum available ringbuffer
+ * space without overwriting the postfix.
+ */
+ u32 postfix;
+
+ /** Position in the ringbuffer of the end of the whole request */
u32 tail;
/** Context related to this request */
@@ -2091,6 +2154,26 @@ struct drm_i915_gem_request {
struct list_head client_list;
uint32_t uniq;
+
+ /**
+ * The ELSP only accepts two elements at a time, so we queue
+ * context/tail pairs on a given queue (ring->execlist_queue) until the
+ * hardware is available. The queue serves a double purpose: we also use
+ * it to keep track of the up to 2 contexts currently in the hardware
+ * (usually one in execution and the other queued up by the GPU): We
+ * only remove elements from the head of the queue when the hardware
+ * informs us that an element has been completed.
+ *
+ * All accesses to the queue are mediated by a spinlock
+ * (ring->execlist_lock).
+ */
+
+ /** Execlist link in the submission queue.*/
+ struct list_head execlist_link;
+
+ /** Execlists no. of times this request has been sent to the ELSP */
+ int elsp_submitted;
+
};
void i915_gem_request_free(struct kref *req_ref);
@@ -2372,7 +2455,8 @@ struct drm_i915_cmd_table {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2443,6 +2527,7 @@ struct i915_params {
int use_mmio_flip;
bool mmio_debug;
bool verbose_state_checks;
+ bool nuclear_pageflip;
};
extern struct i915_params i915 __read_mostly;
@@ -2487,6 +2572,12 @@ extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -3118,20 +3209,12 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-/* On SNB platform, before reading ring registers forcewake bit
- * must be set to prevent GT core from power down and stale values being
- * returned.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
@@ -3152,15 +3235,8 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
-
-#define FORCEWAKE_RENDER (1 << 0)
-#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_BLITTER (1 << 2)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
- FORCEWAKE_BLITTER)
-
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6c403654e33a..36f1093e3c63 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,8 +39,7 @@
#include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
@@ -1516,7 +1515,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
/* Pinned buffers may be scanout, so flush the cache */
if (obj->pin_display)
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -2414,7 +2413,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
- u32 request_ring_position, request_start;
+ u32 request_start;
int ret;
request = ring->outstanding_lazy_request;
@@ -2422,8 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
return -ENOMEM;
if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
+ ringbuf = request->ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
@@ -2436,7 +2434,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* what.
*/
if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
if (ret)
return ret;
} else {
@@ -2450,10 +2448,10 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ringbuf);
+ request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists) {
- ret = ring->emit_request(ringbuf);
+ ret = ring->emit_request(ringbuf, request);
if (ret)
return ret;
} else {
@@ -2463,7 +2461,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
}
request->head = request_start;
- request->tail = request_ring_position;
+ request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2650,13 +2648,17 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
+ struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
+
+ if (submit_req->ctx != ring->default_context)
+ intel_lr_context_unpin(ring, submit_req->ctx);
+
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
@@ -2783,7 +2785,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->tail;
+ ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
@@ -3634,11 +3636,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
+ obj->cache_dirty = true;
return false;
+ }
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages);
+ obj->cache_dirty = false;
return true;
}
@@ -3674,15 +3679,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
- bool force)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, force))
+ if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
@@ -3729,7 +3733,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_cpu_write_domain(obj, false);
+ i915_gem_object_flush_cpu_write_domain(obj);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3821,27 +3825,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
vma->node.color = cache_level;
obj->cache_level = cache_level;
- if (cpu_write_needs_clflush(obj)) {
- u32 old_read_domains, old_write_domain;
-
- /* If we're coming from LLC cached, then we haven't
- * actually been tracking whether the data is in the
- * CPU cache or not, since we only allow one bit set
- * in obj->write_domain and have been skipping the clflushes.
- * Just set it to the CPU cache for now.
- */
- i915_gem_object_retire(obj);
- WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
+ if (obj->cache_dirty &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
+ if (i915_gem_clflush_object(obj, true))
+ i915_gem_chipset_flush(obj->base.dev);
}
return 0;
@@ -3991,7 +3979,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
goto err_unpin_display;
- i915_gem_object_flush_cpu_write_domain(obj, true);
+ i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -4620,7 +4608,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work);
@@ -5111,7 +5099,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e3ef17783765..b773368fc62c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1380,13 +1380,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
+ ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ DRM_DEBUG("execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
+ return -EINVAL;
+ }
+
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
- ring_id = gen8_dispatch_bsd_ring(dev, file);
- ring = &dev_priv->ring[ring_id];
+
+ switch (args->flags & I915_EXEC_BSD_MASK) {
+ case I915_EXEC_BSD_DEFAULT:
+ ring_id = gen8_dispatch_bsd_ring(dev, file);
+ ring = &dev_priv->ring[ring_id];
+ break;
+ case I915_EXEC_BSD_RING1:
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BSD_RING2:
+ ring = &dev_priv->ring[VCS2];
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
+ (int)(args->flags & I915_EXEC_BSD_MASK));
+ return -EINVAL;
+ }
} else
ring = &dev_priv->ring[VCS];
} else
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index be5c9908659b..48ddbf44c862 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1052,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
+ erq->tail = request->postfix;
}
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8fe5a87705f7..4145d95902f5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -593,7 +593,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -664,7 +664,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -691,7 +691,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -849,7 +849,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
- &to_intel_crtc(crtc)->config.adjusted_mode);
+ &to_intel_crtc(crtc)->config->base.adjusted_mode);
}
static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -879,7 +879,7 @@ static void i915_digport_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private, dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
- int i, ret;
+ int i;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -903,9 +903,11 @@ static void i915_digport_work_func(struct work_struct *work)
valid = true;
if (valid) {
+ enum irqreturn ret;
+
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == true) {
- /* if we get true fallback to old school hpd */
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
old_bits |= (1 << intel_dig_port->base.hpd_pin);
}
}
@@ -2419,19 +2421,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
}
/**
- * i915_error_work_func - do process context error handling work
- * @work: work struct
+ * i915_reset_and_wakeup - do process context error handling work
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_error_work_func(struct work_struct *work)
+static void i915_reset_and_wakeup(struct drm_device *dev)
{
- struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
- work);
- struct drm_i915_private *dev_priv =
- container_of(error, struct drm_i915_private, gpu_error);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2598,10 +2596,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
/**
- * i915_handle_error - handle an error interrupt
+ * i915_handle_error - handle a gpu error
* @dev: drm device
*
- * Do some basic checking of regsiter state at error interrupt time and
+ * Do some basic checking of regsiter state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
@@ -2626,9 +2624,9 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
&dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so that the reset work function
- * i915_error_work_func doesn't deadlock trying to grab various
- * locks. By bumping the reset counter first, the woken
+ * Wakeup waiting processes so that the reset function
+ * i915_reset_and_wakeup doesn't deadlock trying to grab
+ * various locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
@@ -2641,13 +2639,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_error_wake_up(dev_priv, false);
}
- /*
- * Our reset work can grab modeset locks (since it needs to reset the
- * state of outstanding pagelips). Hence it must not be run on our own
- * dev-priv->wq work queue for otherwise the flush_work in the pageflip
- * code will deadlock.
- */
- schedule_work(&dev_priv->gpu_error.work);
+ i915_reset_and_wakeup(dev);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2972,7 +2964,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG;
}
-/**
+/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
@@ -2980,10 +2972,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-static void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(struct work_struct *work)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
@@ -3097,17 +3091,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+ struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
if (!i915.enable_hangcheck)
return;
- /* Don't continually defer the hangcheck, but make sure it is active */
- if (timer_pending(timer))
- return;
- mod_timer(timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ /* Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
+ round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_reset(struct drm_device *dev)
@@ -4340,7 +4335,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
@@ -4351,9 +4345,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- setup_timer(&dev_priv->gpu_error.hangcheck_timer,
- i915_hangcheck_elapsed,
- (unsigned long) dev);
+ INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 07252d8dc726..44f2262a5553 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -52,6 +52,7 @@ struct i915_params i915 __read_mostly = {
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
+ .nuclear_pageflip = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -178,3 +179,7 @@ MODULE_PARM_DESC(mmio_debug,
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+MODULE_PARM_DESC(nuclear_pageflip,
+ "Force atomic modeset functionality; only planes work for now (default: false).");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a39bb0385bcb..33b3d0a24071 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -605,6 +605,15 @@ enum punit_power_well {
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136
+#define FB_GFX_FREQ_FUSE_MASK 0xff
+#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24
+#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16
+#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8
+
+#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
+#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -1471,6 +1480,17 @@ enum punit_power_well {
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+/* Fuse readout registers for GT */
+#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3748,6 +3768,11 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
+#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
+#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5)
+#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
@@ -6052,6 +6077,9 @@ enum punit_power_well {
#define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
+#define GEN9_PG_ENABLE 0xA210
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 4a5af695307e..49f5ade0edb7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,14 +49,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 reg, czcount_30ns;
+ u32 clk_reg, czcount_30ns;
if (IS_CHERRYVIEW(dev))
- reg = CHV_CLK_CTL1;
+ clk_reg = CHV_CLK_CTL1;
else
- reg = VLV_CLK_CTL2;
+ clk_reg = VLV_CLK_CTL2;
- czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!czcount_30ns) {
WARN(!czcount_30ns, "bogus CZ count value");
@@ -116,8 +116,6 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
@@ -126,8 +124,6 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
- if (IS_VALLEYVIEW(dminor->dev))
- rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@@ -285,7 +281,7 @@ static struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
@@ -301,9 +297,14 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
+ u32 rpstat = I915_READ(GEN6_RPSTAT1);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ ret = intel_gpu_freq(dev_priv, ret);
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -312,6 +313,27 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
@@ -319,8 +341,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ return snprintf(buf, PAGE_SIZE,
+ "%d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -333,10 +356,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- else
- ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -360,10 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -374,21 +391,21 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- val * GT_FREQUENCY_MULTIPLIER);
+ intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val;
- if (dev_priv->rps.cur_freq > val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new max_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new max_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -405,10 +422,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- else
- ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -432,10 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
- val = vlv_freq_opcode(dev_priv, val);
- else
- val /= GT_FREQUENCY_MULTIPLIER;
+ val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
@@ -446,17 +457,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_freq_softlimit = val;
- if (dev_priv->rps.cur_freq < val) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
- } else if (!IS_VALLEYVIEW(dev)) {
- /* We still need gen6_set_rps to process the new min_delay and
- * update the interrupt limits even though frequency request is
- * unchanged. */
- gen6_set_rps(dev, dev_priv->rps.cur_freq);
- }
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new min_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,6 +475,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
}
+static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
@@ -494,19 +506,22 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x0000ff) >> 0));
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0x00ff00) >> 8));
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev))
- val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ val = intel_gpu_freq(dev_priv,
+ ((rp_state_cap & 0xff0000) >> 16));
} else {
BUG();
}
@@ -514,6 +529,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
}
static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
@@ -524,6 +540,7 @@ static const struct attribute *gen6_attrs[] = {
};
static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
new file mode 100644
index 000000000000..19a9dd5408f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "intel_drv.h"
+
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int nconnectors = dev->mode_config.num_connector;
+ enum pipe nuclear_pipe = INVALID_PIPE;
+ int ret;
+ int i;
+ bool not_nuclear = false;
+
+ /*
+ * FIXME: At the moment, we only support "nuclear pageflip" on a
+ * single CRTC. Cross-crtc updates will be added later.
+ */
+ for (i = 0; i < nplanes; i++) {
+ struct intel_plane *plane = to_intel_plane(state->planes[i]);
+ if (!plane)
+ continue;
+
+ if (nuclear_pipe == INVALID_PIPE) {
+ nuclear_pipe = plane->pipe;
+ } else if (nuclear_pipe != plane->pipe) {
+ DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * FIXME: We only handle planes for now; make sure there are no CRTC's
+ * or connectors involved.
+ */
+ state->allow_modeset = false;
+ for (i = 0; i < ncrtcs; i++) {
+ struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
+ if (crtc && crtc->pipe != nuclear_pipe)
+ not_nuclear = true;
+ }
+ for (i = 0; i < nconnectors; i++)
+ if (state->connectors[i] != NULL)
+ not_nuclear = true;
+
+ if (not_nuclear) {
+ DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+ int i;
+
+ if (async) {
+ DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Point of no return */
+
+ /*
+ * FIXME: The proper sequence here will eventually be:
+ *
+ * drm_atomic_helper_swap_state(dev, state)
+ * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_planes(dev, state);
+ * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_wait_for_vblanks(dev, state);
+ * drm_atomic_helper_cleanup_planes(dev, state);
+ * drm_atomic_state_free(state);
+ *
+ * once we have full atomic modeset. For now, just manually update
+ * plane states to avoid clobbering good states with dummy states
+ * while nuclear pageflipping.
+ */
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->state->state = state;
+ swap(state->plane_states[i], plane->state);
+ plane->state->state = NULL;
+ }
+ drm_atomic_helper_commit_planes(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+
+/**
+ * intel_connector_atomic_get_property - fetch connector property value
+ * @connector: connector to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific connector property.
+ */
+int
+intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ int i;
+
+ /*
+ * TODO: We only have atomic modeset for planes at the moment, so the
+ * crtc/connector code isn't quite ready yet. Until it's ready,
+ * continue to look up all property values in the DRM's shadow copy
+ * in obj->properties->values[].
+ *
+ * When the crtc/connector state work matures, this function should
+ * be updated to read the values out of the state structure instead.
+ */
+ for (i = 0; i < connector->base.properties->count; i++) {
+ if (connector->base.properties->properties[i] == property) {
+ *val = connector->base.properties->values[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (WARN_ON(!intel_crtc->config))
+ return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+
+ return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
+ GFP_KERNEL);
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 5488efef1837..9e6f727dfd19 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -37,31 +37,58 @@
#include "intel_drv.h"
/**
+ * intel_create_plane_state - create plane state object
+ * @plane: drm plane
+ *
+ * Allocates a fresh plane state for the given plane and sets some of
+ * the state values to sensible initial values.
+ *
+ * Returns: A newly allocated plane state, or NULL on failure
+ */
+struct intel_plane_state *
+intel_create_plane_state(struct drm_plane *plane)
+{
+ struct intel_plane_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->base.plane = plane;
+ state->base.rotation = BIT(DRM_ROTATE_0);
+
+ return state;
+}
+
+/**
* intel_plane_duplicate_state - duplicate plane state
* @plane: drm plane
*
* Allocates and returns a copy of the plane state (both common and
* Intel-specific) for the specified plane.
*
- * Returns: The newly allocated plane state, or NULL or failure.
+ * Returns: The newly allocated plane state, or NULL on failure.
*/
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
- struct intel_plane_state *state;
+ struct drm_plane_state *state;
+ struct intel_plane_state *intel_state;
- if (plane->state)
- state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!plane->state))
+ intel_state = intel_create_plane_state(plane);
else
- state = kzalloc(sizeof(*state), GFP_KERNEL);
+ intel_state = kmemdup(plane->state, sizeof(*intel_state),
+ GFP_KERNEL);
- if (!state)
+ if (!intel_state)
return NULL;
- if (state->base.fb)
- drm_framebuffer_reference(state->base.fb);
+ state = &intel_state->base;
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
- return &state->base;
+ return state;
}
/**
@@ -91,6 +118,15 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_crtc = to_intel_crtc(crtc);
/*
+ * Both crtc and plane->crtc could be NULL if we're updating a
+ * property while the plane is disabled. We don't actually have
+ * anything driver-specific we need to test in that case, so
+ * just return success.
+ */
+ if (!crtc)
+ return 0;
+
+ /*
* The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can
* clip/modify ourselves.
@@ -108,9 +144,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
- intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
intel_state->clip.y2 =
- intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
/*
* Disabling a plane is always okay; we just need to update
@@ -150,3 +186,61 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.atomic_update = intel_plane_atomic_update,
};
+/**
+ * intel_plane_atomic_get_property - fetch plane property value
+ * @plane: plane to fetch property for
+ * @state: state containing the property value
+ * @property: property to look up
+ * @val: pointer to write property value into
+ *
+ * The DRM core does not store shadow copies of properties for
+ * atomic-capable drivers. This entrypoint is used to fetch
+ * the current value of a driver-specific plane property.
+ */
+int
+intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ *val = state->rotation;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_plane_atomic_set_property - set plane property value
+ * @plane: plane to set property for
+ * @state: state to update property value in
+ * @property: property to set
+ * @val: value to set property to
+ *
+ * Writes the specified property value for a plane into the provided atomic
+ * state object.
+ *
+ * Returns 0 on success, -EINVAL on unrecognized properties
+ */
+int
+intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_mode_config *config = &plane->dev->mode_config;
+
+ if (property == config->rotation_property) {
+ state->rotation = val;
+ } else {
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ee41b882e71a..2396cc702d18 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -400,7 +400,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a9af9a4866db..e66e17af0a56 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -110,31 +111,31 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
@@ -157,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 adpa;
if (INTEL_INFO(dev)->gen >= 5)
@@ -303,7 +304,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
@@ -792,6 +793,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_get_property = intel_connector_atomic_get_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1c92ad47502b..f14e8a2a022d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -328,7 +328,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -338,8 +338,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
- WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
+ WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -357,7 +357,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
- ((intel_crtc->config.fdi_lanes - 1) << 1) |
+ ((intel_crtc->config->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
@@ -732,7 +732,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -768,15 +768,15 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock;
if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
@@ -820,19 +820,19 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock * 2;
if (pipe_config->has_pch_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
@@ -909,6 +909,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -923,16 +924,16 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->new_config->dpll_hw_state.wrpll = val;
+ crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
- intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
@@ -1095,6 +1096,7 @@ found:
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder,
int clock)
{
@@ -1144,11 +1146,11 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
} else /* eDP */
return true;
- intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
- intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
- intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -1156,7 +1158,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
}
/* shared DPLL id 0 is DPLL 1 */
- intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+ crtc_state->ddi_pll_sel = pll->id + 1;
return true;
}
@@ -1168,17 +1170,20 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
* For private DPLLs, compute_config() should do the selection for us. This
* function should be folded into compute_config() eventually.
*/
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
- int clock = intel_crtc->new_config->port_clock;
+ int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
- return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return skl_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
else
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ return hsw_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1186,13 +1191,13 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -1217,7 +1222,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1235,7 +1240,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@@ -1244,7 +1249,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -1261,9 +1266,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1274,8 +1279,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) &&
- (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru))
+ (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1293,14 +1298,14 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- if (intel_crtc->config.has_hdmi_sink)
+ if (intel_crtc->config->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
+ temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
@@ -1450,7 +1455,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1460,7 +1465,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1482,7 +1487,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
}
if (IS_SKYLAKE(dev)) {
- uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
/*
@@ -1497,7 +1502,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
- val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+ val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -1514,8 +1519,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
I915_WRITE(DPLL_CTRL2, val);
} else {
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -1532,8 +1537,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_hdmi->set_infoframes(encoder,
- crtc->config.has_hdmi_sink,
- &crtc->config.adjusted_mode);
+ crtc->config->has_hdmi_sink,
+ &crtc->config->base.adjusted_mode);
}
}
@@ -1605,9 +1610,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
+ intel_edp_drrs_enable(intel_dp);
}
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
intel_audio_codec_enable(intel_encoder);
}
@@ -1622,7 +1628,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_crtc->config.has_audio) {
+ if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
@@ -1630,6 +1636,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_drrs_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
@@ -2027,11 +2034,11 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
}
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
@@ -2045,7 +2052,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -2120,7 +2127,7 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 91d8ada8fe6d..3d220a67f865 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
@@ -76,9 +77,9 @@ static const uint32_t intel_cursor_formats[] = {
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
@@ -95,9 +96,9 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *crtc);
static void intel_finish_crtc_commit(struct drm_crtc *crtc);
@@ -897,7 +898,7 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* properly reconstruct framebuffers.
*/
return intel_crtc->active && crtc->primary->fb &&
- intel_crtc->config.adjusted_mode.crtc_clock;
+ intel_crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -906,7 +907,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- return intel_crtc->config.cpu_transcoder;
+ return intel_crtc->config->cpu_transcoder;
}
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -948,7 +949,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1054,10 +1055,10 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (crtc->config.shared_dpll < 0)
+ if (crtc->config->shared_dpll < 0)
return NULL;
- return &dev_priv->shared_dplls[crtc->config.shared_dpll];
+ return &dev_priv->shared_dplls[crtc->config->shared_dpll];
}
/* For ILK+ */
@@ -1507,7 +1508,7 @@ static void intel_init_dpio(struct drm_device *dev)
}
static void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1546,7 +1547,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
}
static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1601,7 +1602,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1631,7 +1632,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
- crtc->config.dpll_hw_state.dpll_md);
+ crtc->config->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -2036,7 +2037,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
else
assert_pll_enabled(dev_priv, pipe);
else {
- if (crtc->config.has_pch_encoder) {
+ if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
@@ -2070,7 +2071,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
static void intel_disable_pipe(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
u32 val;
@@ -2092,7 +2093,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
@@ -2188,11 +2189,12 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+int
+intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
{
int tile_height;
- tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+ tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
return ALIGN(height, tile_height);
}
@@ -2315,7 +2317,7 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
}
}
-int intel_format_to_fourcc(int format)
+static int i9xx_format_to_fourcc(int format)
{
switch (format) {
case DISPPLANE_8BPP:
@@ -2336,8 +2338,35 @@ int intel_format_to_fourcc(int format)
}
}
-static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order)
+ return DRM_FORMAT_XBGR2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+}
+
+static bool
+intel_alloc_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
@@ -2352,10 +2381,9 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
if (!obj)
return false;
- if (plane_config->tiled) {
- obj->tiling_mode = I915_TILING_X;
+ obj->tiling_mode = plane_config->tiling;
+ if (obj->tiling_mode == I915_TILING_X)
obj->stride = crtc->base.primary->fb->pitches[0];
- }
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
mode_cmd.width = crtc->base.primary->fb->width;
@@ -2382,8 +2410,9 @@ out_unref_obj:
return false;
}
-static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_plane_config *plane_config)
+static void
+intel_find_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2471,13 +2500,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
+ ((intel_crtc->config->pipe_src_h - 1) << 16) |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
@@ -2532,17 +2561,17 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
intel_crtc->dspaddr_offset = linear_offset;
}
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
I915_WRITE(reg, dspcntr);
@@ -2634,18 +2663,18 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (intel_crtc->config.pipe_src_w - 1);
- y += (intel_crtc->config.pipe_src_h - 1);
+ x += (intel_crtc->config->pipe_src_w - 1);
+ y += (intel_crtc->config->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config.pipe_src_w - 1) * pixel_size;
+ (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
+ (intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
}
@@ -2731,7 +2760,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
@@ -2744,8 +2773,8 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
- (intel_crtc->config.pipe_src_h - 1) << 16 |
- (intel_crtc->config.pipe_src_w - 1));
+ (intel_crtc->config->pipe_src_h - 1) << 16 |
+ (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
@@ -2941,20 +2970,20 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
* then update the pipesrc and pfit state, even on the flip path.
*/
- adjusted_mode = &crtc->config.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
- if (!crtc->config.pch_pfit.enabled &&
+ if (!crtc->config->pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
}
- crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+ crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -3001,7 +3030,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{
return crtc->base.enabled && crtc->active &&
- crtc->config.has_pch_encoder;
+ crtc->config->has_pch_encoder;
}
static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -3056,7 +3085,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -3154,7 +3183,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -3305,7 +3334,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
@@ -3393,7 +3422,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -3577,7 +3606,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -3666,7 +3695,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -3712,7 +3741,7 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config.fdi_lanes > 2)
+ if (intel_crtc->config->fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
@@ -3764,7 +3793,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
+ if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
@@ -3787,7 +3816,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3828,7 +3857,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
@@ -3858,10 +3887,11 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
WARN_ON(pll->active);
}
- crtc->config.shared_dpll = DPLL_ID_PRIVATE;
+ crtc->config->shared_dpll = DPLL_ID_PRIVATE;
}
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
@@ -3887,7 +3917,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->new_config->dpll_hw_state,
+ if (memcmp(&crtc_state->dpll_hw_state,
&pll->new_config->hw_state,
sizeof(pll->new_config->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
@@ -3912,9 +3942,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
found:
if (pll->new_config->crtc_mask == 0)
- pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
+ pll->new_config->hw_state = crtc_state->dpll_hw_state;
- crtc->new_config->shared_dpll = i;
+ crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
@@ -4011,10 +4041,10 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
- I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
@@ -4024,7 +4054,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -4034,8 +4064,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
@@ -4072,7 +4102,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
/* We can only enable IPS after we enable a plane and wait for a vblank */
@@ -4105,7 +4135,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.ips_enabled)
+ if (!crtc->config->ips_enabled)
return;
assert_plane_enabled(dev_priv, crtc->plane);
@@ -4154,7 +4184,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
@@ -4256,17 +4286,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
ironlake_set_pipeconf(crtc);
@@ -4280,7 +4310,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
@@ -4301,7 +4331,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(crtc);
assert_vblank_disabled(crtc);
@@ -4367,19 +4397,19 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
- I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
- intel_crtc->config.pixel_multiplier - 1);
+ if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
+ I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+ intel_crtc->config->pixel_multiplier - 1);
}
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n, NULL);
+ &intel_crtc->config->fdi_m_n, NULL);
}
haswell_set_pipeconf(crtc);
@@ -4393,7 +4423,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
dev_priv->display.fdi_link_train(crtc);
@@ -4418,10 +4448,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -4446,7 +4476,7 @@ static void skylake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
@@ -4461,7 +4491,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.enabled) {
+ if (crtc->config->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4488,7 +4518,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4499,7 +4529,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -4539,7 +4569,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (!intel_crtc->active)
return;
@@ -4554,12 +4584,12 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- if (intel_crtc->config.has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config.dp_encoder_is_mst)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -4571,7 +4601,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- if (intel_crtc->config.has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
@@ -4602,9 +4632,9 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_config *pipe_config = &crtc->config;
+ struct intel_crtc_state *pipe_config = crtc->config;
- if (!crtc->config.gmch_pfit.control)
+ if (!pipe_config->gmch_pfit.control)
return;
/*
@@ -4683,8 +4713,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (intel_crtc->config.pch_pfit.enabled ||
- intel_crtc->config.pch_pfit.force_thru)
+ if (intel_crtc->config->pch_pfit.enabled ||
+ intel_crtc->config->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4908,7 +4938,7 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
- intel_crtc->new_config->adjusted_mode.crtc_clock);
+ intel_crtc->new_config->base.adjusted_mode.crtc_clock);
}
return max_pixclk;
@@ -4976,12 +5006,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc, &intel_crtc->config);
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc, &intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
}
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5005,9 +5035,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc, &intel_crtc->config);
+ chv_enable_pll(intel_crtc, intel_crtc->config);
else
- vlv_enable_pll(intel_crtc, &intel_crtc->config);
+ vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5038,8 +5068,8 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -5057,7 +5087,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
@@ -5109,7 +5139,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!crtc->config.gmch_pfit.control)
+ if (!crtc->config->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -5367,7 +5397,7 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
}
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
@@ -5408,7 +5438,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return true;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config.fdi_lanes <= 2) {
+ pipe_B_crtc->config->fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
@@ -5426,10 +5456,10 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
@@ -5472,7 +5502,7 @@ retry:
}
static void hsw_compute_ips_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
@@ -5480,11 +5510,11 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
@@ -5729,30 +5759,31 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
}
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
}
}
@@ -5805,7 +5836,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config.cpu_transcoder;
+ enum transcoder transcoder = crtc->config->cpu_transcoder;
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -5817,7 +5848,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily accessed).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -5834,15 +5865,15 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc)
{
- if (crtc->config.has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ if (crtc->config->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
- &crtc->config.dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
+ &crtc->config->dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
@@ -5865,7 +5896,7 @@ static void vlv_update_pll(struct intel_crtc *crtc,
}
static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5926,7 +5957,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (crtc->config.has_dp_encoder) {
+ if (pipe_config->has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5956,7 +5987,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
}
static void chv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
@@ -5969,7 +6000,7 @@ static void chv_update_pll(struct intel_crtc *crtc,
}
static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6054,7 +6085,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- struct intel_crtc_config pipe_config = {
+ struct intel_crtc_state pipe_config = {
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6087,6 +6118,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
}
static void i9xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
@@ -6094,9 +6126,9 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
@@ -6109,14 +6141,14 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -6144,7 +6176,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->new_config->sdvo_tv_clock)
+ if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
@@ -6153,25 +6185,26 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
}
}
static void i8xx_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->new_config->dpll;
+ struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, reduced_clock);
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -6196,7 +6229,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6204,9 +6237,9 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -6264,12 +6297,12 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config.pipe_src_w - 1) << 16) |
- (intel_crtc->config.pipe_src_h - 1));
+ ((intel_crtc->config->pipe_src_w - 1) << 16) |
+ (intel_crtc->config->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6277,56 +6310,56 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
uint32_t tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->adjusted_mode.crtc_vtotal += 1;
- pipe_config->adjusted_mode.crtc_vblank_end += 1;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->base.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->adjusted_mode.flags;
+ mode->flags = pipe_config->base.adjusted_mode.flags;
- mode->clock = pipe_config->adjusted_mode.crtc_clock;
- mode->flags |= pipe_config->adjusted_mode.flags;
+ mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->flags |= pipe_config->base.adjusted_mode.flags;
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -6341,17 +6374,17 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config.double_wide)
+ if (intel_crtc->config->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+ if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -6376,7 +6409,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -6385,14 +6418,15 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
+ if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6424,7 +6458,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
if (is_dsi)
return 0;
- if (!crtc->new_config->clock_set) {
+ if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6435,7 +6469,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6456,23 +6490,23 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(crtc,
+ i8xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, crtc->new_config);
+ chv_update_pll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(crtc, crtc->new_config);
+ vlv_update_pll(crtc, crtc_state);
} else {
- i9xx_update_pll(crtc,
+ i9xx_update_pll(crtc, crtc_state,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6481,7 +6515,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6511,7 +6545,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6540,8 +6574,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = clock.dot / 5;
}
-static void i9xx_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6549,27 +6584,30 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
+ fb = &intel_fb->base;
+
val = I915_READ(DSPCNTR(plane));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
if (INTEL_INFO(dev)->gen >= 4) {
- if (plane_config->tiled)
+ if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
@@ -6580,29 +6618,27 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), plane, fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+ crtc->base.primary->fb = fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6632,7 +6668,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7112,7 +7148,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -7130,15 +7166,15 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
@@ -7167,7 +7203,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
* consideration.
*/
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
/*
@@ -7191,7 +7227,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -7202,7 +7238,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -7215,15 +7251,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t val;
val = 0;
- if (IS_HASWELL(dev) && intel_crtc->config.dither)
+ if (IS_HASWELL(dev) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -7237,7 +7273,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
- switch (intel_crtc->config.pipe_bpp) {
+ switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -7255,7 +7291,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config.dither)
+ if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
I915_WRITE(PIPEMISC(pipe), val);
@@ -7263,6 +7299,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
}
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
@@ -7285,7 +7322,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
*/
limit = intel_limit(intel_crtc, refclk);
ret = dev_priv->display.find_dpll(limit, intel_crtc,
- intel_crtc->new_config->port_clock,
+ crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7324,6 +7361,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
}
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
@@ -7361,10 +7399,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->new_config->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7377,20 +7415,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
+ dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->new_config->has_dp_encoder)
+ if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->new_config->dpll.p2) {
+ switch (crtc_state->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7413,7 +7451,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
@@ -7427,39 +7466,39 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(&crtc->base, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !crtc->new_config->clock_set) {
+ if (!ok && !crtc_state->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!crtc->new_config->clock_set) {
- crtc->new_config->dpll.n = clock.n;
- crtc->new_config->dpll.m1 = clock.m1;
- crtc->new_config->dpll.m2 = clock.m2;
- crtc->new_config->dpll.p1 = clock.p1;
- crtc->new_config->dpll.p2 = clock.p2;
+ if (!crtc_state->clock_set) {
+ crtc_state->dpll.n = clock.n;
+ crtc_state->dpll.m1 = clock.m1;
+ crtc_state->dpll.m2 = clock.m2;
+ crtc_state->dpll.p1 = clock.p1;
+ crtc_state->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (crtc->new_config->has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
+ if (crtc_state->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(crtc,
+ dpll = ironlake_compute_dpll(crtc, crtc_state,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- crtc->new_config->dpll_hw_state.dpll = dpll;
- crtc->new_config->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- crtc->new_config->dpll_hw_state.fp1 = fp2;
+ crtc_state->dpll_hw_state.fp1 = fp2;
else
- crtc->new_config->dpll_hw_state.fp1 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(crtc);
+ pll = intel_get_shared_dpll(crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
@@ -7513,7 +7552,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* registers are not unnecessarily read).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
- crtc->config.has_drrs) {
+ crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -7534,9 +7573,9 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
}
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- if (crtc->config.has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
@@ -7545,14 +7584,14 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
}
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7567,8 +7606,80 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
}
+static void
+skylake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset, stride_mult;
+ int pipe = crtc->pipe;
+ int fourcc, pixel_format;
+ int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ val = I915_READ(PLANE_CTL(pipe, 0));
+ if (val & PLANE_CTL_TILED_MASK)
+ plane_config->tiling = I915_TILING_X;
+
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+
+ base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+ plane_config->base = base;
+
+ offset = I915_READ(PLANE_OFFSET(pipe, 0));
+
+ val = I915_READ(PLANE_SIZE(pipe, 0));
+ fb->height = ((val >> 16) & 0xfff) + 1;
+ fb->width = ((val >> 0) & 0x1fff) + 1;
+
+ val = I915_READ(PLANE_STRIDE(pipe, 0));
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ stride_mult = 64;
+ break;
+ case I915_TILING_X:
+ stride_mult = 512;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto error;
+ }
+ fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
+
+ plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
+ plane_config->size);
+
+ crtc->base.primary->fb = fb;
+ return;
+
+error:
+ kfree(fb);
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7591,68 +7702,71 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static void ironlake_get_plane_config(struct intel_crtc *crtc,
- struct intel_plane_config *plane_config)
+static void
+ironlake_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
- int pipe = crtc->pipe, plane = crtc->plane;
+ int pipe = crtc->pipe;
int fourcc, pixel_format;
int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
- crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
- if (!crtc->base.primary->fb) {
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
- val = I915_READ(DSPCNTR(plane));
+ fb = &intel_fb->base;
+
+ val = I915_READ(DSPCNTR(pipe));
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
- plane_config->tiled = true;
+ plane_config->tiling = I915_TILING_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = intel_format_to_fourcc(pixel_format);
- crtc->base.primary->fb->pixel_format = fourcc;
- crtc->base.primary->fb->bits_per_pixel =
- drm_format_plane_cpp(fourcc, 0) * 8;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->pixel_format = fourcc;
+ fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- offset = I915_READ(DSPOFFSET(plane));
+ offset = I915_READ(DSPOFFSET(pipe));
} else {
- if (plane_config->tiled)
- offset = I915_READ(DSPTILEOFF(plane));
+ if (plane_config->tiling)
+ offset = I915_READ(DSPTILEOFF(pipe));
else
- offset = I915_READ(DSPLINOFF(plane));
+ offset = I915_READ(DSPLINOFF(pipe));
}
plane_config->base = base;
val = I915_READ(PIPESRC(pipe));
- crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
- crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
+ fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
- plane_config->tiled);
+ aligned_height = intel_fb_align_height(dev, fb->height,
+ plane_config->tiling);
- plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height);
+ plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
- DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe, plane, crtc->base.primary->fb->width,
- crtc->base.primary->fb->height,
- crtc->base.primary->fb->bits_per_pixel, base,
- crtc->base.primary->fb->pitches[0],
+ DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe_name(pipe), fb->width, fb->height,
+ fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
+
+ crtc->base.primary->fb = fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7862,19 +7976,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
- *
- * The other problem is that hsw_restore_lcpll() is called as part of
- * the runtime PM resume sequence, so we can't just call
- * gen6_gt_force_wake_get() because that function calls
- * intel_runtime_pm_get(), and we can't change the runtime PM refcount
- * while we are on the resume sequence. So to solve this problem we have
- * to call special forcewake code that doesn't touch runtime PM and
- * doesn't enable the forcewake delayed work.
*/
- spin_lock_irq(&dev_priv->uncore.lock);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7904,11 +8007,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- /* See the big comment above. */
- spin_lock_irq(&dev_priv->uncore.lock);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
/*
@@ -7970,9 +8069,10 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
- if (!intel_ddi_pll_select(crtc))
+ if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
crtc->lowfreq_avail = false;
@@ -7982,7 +8082,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 temp, dpll_ctl1;
@@ -8013,7 +8113,7 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -8028,7 +8128,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8070,7 +8170,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8233,7 +8333,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
@@ -8264,10 +8364,10 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (on)
base = intel_crtc->cursor_addr;
- if (x >= intel_crtc->config.pipe_src_w)
+ if (x >= intel_crtc->config->pipe_src_w)
base = 0;
- if (y >= intel_crtc->config.pipe_src_h)
+ if (y >= intel_crtc->config->pipe_src_h)
base = 0;
if (x < 0) {
@@ -8295,7 +8395,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
- to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
base += (intel_crtc->cursor_height *
intel_crtc->cursor_width - 1) * 4;
}
@@ -8565,7 +8665,7 @@ retry:
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -8606,7 +8706,7 @@ retry:
fail:
intel_crtc->new_enabled = crtc->enabled;
if (intel_crtc->new_enabled)
- intel_crtc->new_config = &intel_crtc->config;
+ intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
@@ -8652,7 +8752,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_config *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll = pipe_config->dpll_hw_state.dpll;
@@ -8669,7 +8769,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8776,7 +8876,7 @@ int intel_dotclock_calculate(int link_freq,
}
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
@@ -8789,7 +8889,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* agree once we know their relationship in the encoder's
* get_config() function.
*/
- pipe_config->adjusted_mode.crtc_clock =
+ pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
&pipe_config->fdi_m_n);
}
@@ -8800,9 +8900,9 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -8917,6 +9017,14 @@ out:
intel_runtime_pm_put(dev_priv);
}
+static void intel_crtc_set_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ kfree(crtc->config);
+ crtc->config = crtc_state;
+ crtc->base.state = &crtc_state->base;
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8933,6 +9041,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
+ intel_crtc_set_state(intel_crtc, NULL);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -9818,7 +9927,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
crtc->new_enabled = crtc->base.enabled;
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -9851,7 +9960,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
static void
connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
@@ -9878,7 +9987,7 @@ connected_sink_compute_bpp(struct intel_connector *connector,
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
@@ -9947,7 +10056,7 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
}
static void intel_dump_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
@@ -9981,10 +10090,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
@@ -10083,14 +10192,14 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
bool retry = true;
@@ -10108,8 +10217,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!pipe_config)
return ERR_PTR(-ENOMEM);
- drm_mode_copy(&pipe_config->adjusted_mode, mode);
- drm_mode_copy(&pipe_config->requested_mode, mode);
+ drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->base.mode, mode);
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -10120,13 +10229,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->adjusted_mode.flags &
+ if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
@@ -10145,7 +10254,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_crtc_get_hv_timing(&pipe_config->requested_mode,
+ drm_crtc_get_hv_timing(&pipe_config->base.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -10155,7 +10264,8 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
+ drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
@@ -10175,7 +10285,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -10332,7 +10442,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
- intel_crtc->new_config != &intel_crtc->config);
+ intel_crtc->new_config != intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
@@ -10384,8 +10494,8 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
static bool
intel_pipe_config_compare(struct drm_device *dev,
- struct intel_crtc_config *current_config,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *current_config,
+ struct intel_crtc_state *pipe_config)
{
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@@ -10476,19 +10586,19 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
}
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
@@ -10499,17 +10609,17 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(has_audio);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -10559,7 +10669,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
#undef PIPE_CONF_CHECK_X
@@ -10709,7 +10819,7 @@ check_crtc_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_crtc_config pipe_config;
+ struct intel_crtc_state pipe_config;
for_each_intel_crtc(dev, crtc) {
bool enabled = false;
@@ -10759,11 +10869,11 @@ check_crtc_state(struct drm_device *dev)
"(expected %i, found %i)\n", crtc->active, active);
if (active &&
- !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
+ !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
- intel_dump_pipe_config(crtc, &crtc->config,
+ intel_dump_pipe_config(crtc, crtc->config,
"[sw state]");
}
}
@@ -10828,16 +10938,16 @@ intel_modeset_check_state(struct drm_device *dev)
check_shared_dpll_state(dev);
}
-void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock)
{
/*
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- pipe_config->adjusted_mode.crtc_clock, dotclock);
+ pipe_config->base.adjusted_mode.crtc_clock, dotclock);
}
static void update_scanline_offset(struct intel_crtc *crtc)
@@ -10863,7 +10973,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* one to the value.
*/
if (IS_GEN2(dev)) {
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
int vtotal;
vtotal = mode->crtc_vtotal;
@@ -10878,7 +10988,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
-static struct intel_crtc_config *
+static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
@@ -10886,7 +10996,7 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
- struct intel_crtc_config *pipe_config = NULL;
+ struct intel_crtc_state *pipe_config = NULL;
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
@@ -10911,10 +11021,40 @@ out:
return pipe_config;
}
+static int __intel_set_mode_setup_plls(struct drm_device *dev,
+ unsigned modeset_pipes,
+ unsigned disable_pipes)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ if (!dev_priv->display.crtc_compute_clock)
+ return 0;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ struct intel_crtc_state *state = intel_crtc->new_config;
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+ state);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -10948,21 +11088,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
- if (dev_priv->display.crtc_compute_clock) {
- unsigned clear_pipes = modeset_pipes | disable_pipes;
-
- ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
- if (ret)
- goto done;
-
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc);
- if (ret) {
- intel_shared_dpll_abort_config(dev_priv);
- goto done;
- }
- }
- }
+ ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
+ if (ret)
+ goto done;
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -10983,8 +11111,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
- to_intel_crtc(crtc)->config = *pipe_config;
- to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
+ intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
/*
* Calculate and store various constants which
@@ -10992,7 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc,
- &pipe_config->adjusted_mode);
+ &pipe_config->base.adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
@@ -11028,7 +11155,6 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
- kfree(pipe_config);
kfree(saved_mode);
return ret;
}
@@ -11036,7 +11162,7 @@ done:
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
@@ -11056,7 +11182,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
@@ -11151,7 +11277,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
crtc->new_enabled = config->save_crtc_enabled[count++];
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11363,7 +11489,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
if (crtc->new_enabled)
- crtc->new_config = &crtc->config;
+ crtc->new_config = crtc->config;
else
crtc->new_config = NULL;
}
@@ -11400,7 +11526,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
- struct intel_crtc_config *pipe_config;
+ struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
@@ -11457,7 +11583,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
goto fail;
} else if (pipe_config) {
if (pipe_config->has_audio !=
- to_intel_crtc(set->crtc)->config.has_audio)
+ to_intel_crtc(set->crtc)->config->has_audio)
config->mode_changed = true;
/*
@@ -11543,6 +11669,8 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
+ .atomic_duplicate_state = intel_crtc_duplicate_state,
+ .atomic_destroy_state = intel_crtc_destroy_state,
};
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -11737,7 +11865,6 @@ intel_check_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
@@ -11771,7 +11898,7 @@ intel_check_primary_plane(struct drm_plane *plane,
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
+ state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
@@ -11937,16 +12064,17 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc)
void intel_plane_destroy(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
- intel_plane_destroy_state(plane, plane->state);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
-static const struct drm_plane_funcs intel_primary_plane_funcs = {
+const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = drm_plane_helper_update,
.disable_plane = drm_plane_helper_disable,
.destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
@@ -11956,6 +12084,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *primary;
+ struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
int num_formats;
@@ -11963,17 +12092,17 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (primary == NULL)
return NULL;
- primary->base.state = intel_plane_duplicate_state(&primary->base);
- if (primary->base.state == NULL) {
+ state = intel_create_plane_state(&primary->base);
+ if (!state) {
kfree(primary);
return NULL;
}
+ primary->base.state = &state->base;
primary->can_scale = false;
primary->max_downscale = 1;
primary->pipe = pipe;
primary->plane = pipe;
- primary->rotation = BIT(DRM_ROTATE_0);
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
@@ -11988,7 +12117,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
}
drm_universal_plane_init(dev, &primary->base, 0,
- &intel_primary_plane_funcs,
+ &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
@@ -12001,7 +12130,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&primary->base.base,
dev->mode_config.rotation_property,
- primary->rotation);
+ state->base.rotation);
}
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
@@ -12116,40 +12245,32 @@ update:
intel_crtc_update_cursor(crtc, state->visible);
}
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = drm_plane_helper_update,
- .disable_plane = drm_plane_helper_disable,
- .destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
-};
-
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *cursor;
+ struct intel_plane_state *state;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (cursor == NULL)
return NULL;
- cursor->base.state = intel_plane_duplicate_state(&cursor->base);
- if (cursor->base.state == NULL) {
+ state = intel_create_plane_state(&cursor->base);
+ if (!state) {
kfree(cursor);
return NULL;
}
+ cursor->base.state = &state->base;
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
- cursor->rotation = BIT(DRM_ROTATE_0);
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_cursor_plane_funcs,
+ &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
@@ -12163,7 +12284,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
- cursor->rotation);
+ state->base.rotation);
}
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
@@ -12175,6 +12296,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
@@ -12183,6 +12305,11 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (intel_crtc == NULL)
return;
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ goto fail;
+ intel_crtc_set_state(intel_crtc, crtc_state);
+
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
@@ -12235,6 +12362,7 @@ fail:
drm_plane_cleanup(primary);
if (cursor)
drm_plane_cleanup(cursor);
+ kfree(crtc_state);
kfree(intel_crtc);
}
@@ -12330,6 +12458,7 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12460,6 +12589,37 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ /*
+ * FIXME: We don't have full atomic support yet, but we want to be
+ * able to enable/test plane updates via the atomic interface in the
+ * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
+ * will take some atomic codepaths to lookup properties during
+ * drmModeGetConnector() that unconditionally dereference
+ * connector->state.
+ *
+ * We create a dummy connector state here for each connector to ensure
+ * the DRM core doesn't try to dereference a NULL connector->state.
+ * The actual connector properties will never be updated or contain
+ * useful information, but since we're doing this specifically for
+ * testing/debug of the plane operations (and only when a specific
+ * kernel module option is given), that shouldn't really matter.
+ *
+ * Once atomic support for crtc's + connectors lands, this loop should
+ * be removed since we'll be setting up real connector state, which
+ * will contain Intel-specific properties.
+ */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (!WARN_ON(connector->state)) {
+ connector->state =
+ kzalloc(sizeof(*connector->state),
+ GFP_KERNEL);
+ }
+ }
+ }
+
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12600,8 +12760,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- aligned_height = intel_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ aligned_height = intel_fb_align_height(dev, mode_cmd->height,
+ obj->tiling_mode);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12643,6 +12803,8 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
+ .atomic_check = intel_atomic_check,
+ .atomic_commit = intel_atomic_commit,
};
/* Set up chip specific display functions */
@@ -12661,23 +12823,32 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
- if (HAS_DDI(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ skylake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- if (INTEL_INFO(dev)->gen >= 9)
- dev_priv->display.update_primary_plane =
- skylake_update_primary_plane;
- else
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ } else if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
- dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ ironlake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
@@ -12687,7 +12858,8 @@ static void intel_init_display(struct drm_device *dev)
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -12696,7 +12868,8 @@ static void intel_init_display(struct drm_device *dev)
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -13068,8 +13241,8 @@ void intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- if (dev_priv->display.get_plane_config) {
- dev_priv->display.get_plane_config(crtc,
+ if (dev_priv->display.get_initial_plane_config) {
+ dev_priv->display.get_initial_plane_config(crtc,
&crtc->plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -13133,7 +13306,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->config.cpu_transcoder);
+ reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
@@ -13337,12 +13510,12 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
- memset(&crtc->config, 0, sizeof(crtc->config));
+ memset(crtc->config, 0, sizeof(*crtc->config));
- crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+ crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
crtc->active = dev_priv->display.get_pipe_config(crtc,
- &crtc->config);
+ crtc->config);
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13379,7 +13552,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
}
@@ -13429,7 +13602,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
- intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
+ intel_mode_from_pipe_config(&crtc->base.mode,
+ crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -13444,7 +13618,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
- intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
+ intel_dump_pipe_config(crtc, crtc->config,
+ "[setup_hw_state]");
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index df7b558f3222..eea9e366a109 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -1074,7 +1075,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
{
u32 ctrl1;
@@ -1101,7 +1102,7 @@ skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
}
static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
@@ -1118,7 +1119,7 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
static void
intel_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config, int link_bw)
+ struct intel_crtc_state *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
@@ -1151,11 +1152,11 @@ intel_dp_set_clock(struct intel_encoder *encoder,
bool
intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = encoder->new_crtc;
@@ -1269,7 +1270,7 @@ found:
&pipe_config->dp_m_n);
if (intel_connector->panel.downclock_mode != NULL &&
- intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
@@ -1295,11 +1296,12 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
+ crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
- if (crtc->config.port_clock == 162000) {
+ if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
@@ -1324,7 +1326,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
/*
* There are four kinds of DP registers:
@@ -1352,7 +1354,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
/* Split out the IBX/CPU vs CPT settings */
@@ -2013,7 +2015,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
@@ -2050,7 +2052,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
@@ -2073,7 +2075,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -2102,7 +2104,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
if (HAS_PSR(dev) && !HAS_DDI(dev))
@@ -2312,7 +2314,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- if (crtc->config.has_audio) {
+ if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -4400,7 +4402,9 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4420,7 +4424,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
return;
}
-bool
+enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -4428,7 +4432,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
- bool ret = true;
+ enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
@@ -4487,7 +4491,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
- ret = false;
+
+ ret = IRQ_HANDLED;
+
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
@@ -4745,24 +4751,24 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
- struct intel_crtc_config *config = NULL;
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_dp *intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc_state *config = NULL;
struct intel_crtc *intel_crtc = NULL;
- struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
- enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
+ enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
- if (intel_connector == NULL) {
- DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
+ if (intel_dp == NULL) {
+ DRM_DEBUG_KMS("DRRS not supported.\n");
return;
}
@@ -4771,8 +4777,8 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
* platforms that cannot have PSR and DRRS enabled at the same time.
*/
- encoder = intel_attached_encoder(&intel_connector->base);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ dig_port = dp_to_dig_port(intel_dp);
+ encoder = &dig_port->base;
intel_crtc = encoder->new_crtc;
if (!intel_crtc) {
@@ -4780,17 +4786,18 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- config = &intel_crtc->config;
+ config = intel_crtc->config;
- if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
+ if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
- if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
+ if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+ refresh_rate)
index = DRRS_LOW_RR;
- if (index == intel_dp->drrs_state.refresh_rate_type) {
+ if (index == dev_priv->drrs.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
@@ -4802,7 +4809,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
}
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
- reg = PIPECONF(intel_crtc->config.cpu_transcoder);
+ reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
@@ -4813,30 +4820,154 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
I915_WRITE(reg, val);
}
+ dev_priv->drrs.refresh_rate_type = index;
+
+ DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs) {
+ DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (WARN_ON(dev_priv->drrs.dp)) {
+ DRM_ERROR("DRRS already enabled\n");
+ goto unlock;
+ }
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+
+ dev_priv->drrs.dp = intel_dp;
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->config->has_drrs)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+
+ dev_priv->drrs.dp = NULL;
+ mutex_unlock(&dev_priv->drrs.mutex);
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+static void intel_edp_drrs_downclock_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), drrs.work.work);
+ struct intel_dp *intel_dp;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+
+ if (!intel_dp)
+ goto unlock;
+
/*
- * mutex taken to ensure that there is no race between differnt
- * drrs calls trying to update refresh rate. This scenario may occur
- * in future when idleness detection based DRRS in kernel and
- * possible calls from user space to set differnt RR are made.
+ * The delayed work can race with an invalidate hence we need to
+ * recheck.
*/
- mutex_lock(&intel_dp->drrs_state.mutex);
+ if (dev_priv->drrs.busy_frontbuffer_bits)
+ goto unlock;
- intel_dp->drrs_state.refresh_rate_type = index;
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ intel_dp->attached_connector->panel.
+ downclock_mode->vrefresh);
- mutex_unlock(&intel_dp->drrs_state.mutex);
+unlock:
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+ intel_dp_set_drrs_state(dev_priv->dev,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+ }
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+void intel_edp_drrs_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (!dev_priv->drrs.dp)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
+ !dev_priv->drrs.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->drrs.work,
+ msecs_to_jiffies(1000));
+ mutex_unlock(&dev_priv->drrs.mutex);
}
static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+intel_dp_drrs_init(struct intel_connector *intel_connector,
+ struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
@@ -4858,13 +4989,13 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return NULL;
}
- dev_priv->drrs.connector = intel_connector;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
- mutex_init(&intel_dp->drrs_state.mutex);
+ mutex_init(&dev_priv->drrs.mutex);
- intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
+ dev_priv->drrs.type = dev_priv->vbt.drrs_type;
- intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
+ dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -4884,7 +5015,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
+ dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
@@ -4933,7 +5064,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
- intel_dig_port,
intel_connector, fixed_mode);
break;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7f8c6a66680a..9f67a379a9a5 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -26,11 +26,12 @@
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -38,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
int bpp;
int lane_count, slots;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
int mst_pbn;
@@ -157,7 +158,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(port),
+ intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
@@ -170,7 +172,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->port, intel_crtc->config.pbn, &slots);
+ intel_mst->port,
+ intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
@@ -216,14 +219,14 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
pipe_config->has_dp_encoder = true;
@@ -254,7 +257,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
default:
break;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
@@ -311,7 +314,9 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 30e968f8c55e..eef79ccd0b7c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,7 +143,7 @@ struct intel_encoder {
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
- struct intel_crtc_config *);
+ struct intel_crtc_state *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
@@ -159,7 +159,7 @@ struct intel_encoder {
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -257,13 +257,15 @@ struct intel_plane_state {
bool hides_primary;
};
-struct intel_plane_config {
- bool tiled;
+struct intel_initial_plane_config {
+ unsigned int tiling;
int size;
u32 base;
};
-struct intel_crtc_config {
+struct intel_crtc_state {
+ struct drm_crtc_state base;
+
/**
* quirks - bitfield with hw state readout quirks
*
@@ -276,16 +278,6 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
- /* User requested mode, only valid as a starting point to
- * compute adjusted_mode, except in the case of (S)DVO where
- * it's also for the output timings of the (S)DVO chip.
- * adjusted_mode will then correspond to the S(DVO) chip's
- * preferred input timings. */
- struct drm_display_mode requested_mode;
- /* Actual pipe timings ie. what we program into the pipe timing
- * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
- struct drm_display_mode adjusted_mode;
-
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
@@ -476,9 +468,9 @@ struct intel_crtc {
uint32_t cursor_size;
uint32_t cursor_base;
- struct intel_plane_config plane_config;
- struct intel_crtc_config config;
- struct intel_crtc_config *new_config;
+ struct intel_initial_plane_config plane_config;
+ struct intel_crtc_state *config;
+ struct intel_crtc_state *new_config;
bool new_enabled;
/* reset counter value when the last flip was submitted */
@@ -517,7 +509,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- unsigned int rotation;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -526,6 +517,12 @@ struct intel_plane {
*/
struct intel_plane_wm_parameters wm;
+ /*
+ * NOTE: Do not place new plane state fields here (e.g., when adding
+ * new plane properties). New runtime state should now be placed in
+ * the intel_plane_state structure and accessed via drm_plane->state.
+ */
+
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -595,17 +592,6 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
-/**
- * HIGH_RR is the highest eDP panel refresh rate read from EDID
- * LOW_RR is the lowest eDP panel refresh rate found from EDID
- * parsing for same resolution.
- */
-enum edp_drrs_refresh_rate_type {
- DRRS_HIGH_RR,
- DRRS_LOW_RR,
- DRRS_MAX_RR, /* RR count */
-};
-
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -661,12 +647,6 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
- struct {
- enum drrs_support_type type;
- enum edp_drrs_refresh_rate_type refresh_rate_type;
- struct mutex mutex;
- } drrs_state;
-
};
struct intel_digital_port {
@@ -675,7 +655,7 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
- bool (*hpd_pulse)(struct intel_digital_port *, bool);
+ enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
};
struct intel_dp_mst_encoder {
@@ -856,17 +836,18 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-bool intel_ddi_pll_select(struct intel_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
@@ -896,6 +877,8 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
+int intel_fb_align_height(struct drm_device *dev, int height,
+ unsigned int tiling);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
@@ -907,6 +890,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+extern const struct drm_plane_funcs intel_plane_funcs;
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
@@ -961,6 +945,14 @@ int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
+int intel_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+int intel_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -969,7 +961,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *state);
void intel_put_shared_dpll(struct intel_crtc *crtc);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
@@ -999,11 +992,11 @@ void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
-ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1011,8 +1004,7 @@ void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_config *pipe_config);
-int intel_format_to_fourcc(int format);
+ struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
@@ -1028,16 +1020,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
-bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
@@ -1053,6 +1044,11 @@ int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h);
int intel_disable_plane(struct drm_plane *plane);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1109,7 +1105,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
+ struct intel_crtc_state *pipe_config);
/* intel_lvds.c */
@@ -1144,10 +1140,10 @@ void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
@@ -1253,6 +1249,21 @@ void intel_pre_disable_primary(struct drm_crtc *crtc);
void intel_tv_init(struct drm_device *dev);
/* intel_atomic.c */
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+int intel_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/* intel_atomic_plane.c */
+struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 42b6d6f5cecc..ef3df5e3d819 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -24,24 +24,219 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-/* the sub-encoders aka panel drivers */
-static const struct intel_dsi_device intel_dsi_devices[] = {
+static const struct {
+ u16 panel_id;
+ struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
+} intel_dsi_drivers[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
- .name = "vbt-generic-dsi-vid-mode-display",
- .dev_ops = &vbt_generic_dsi_display_ops,
+ .init = vbt_panel_init,
},
};
+static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("DPI FIFOs are not empty\n");
+}
+
+static void write_data(struct drm_i915_private *dev_priv, u32 reg,
+ const u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = 0;
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(reg, val);
+ }
+}
+
+static void read_data(struct drm_i915_private *dev_priv, u32 reg,
+ u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = I915_READ(reg);
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ *data++ = val >> 8 * j;
+ }
+}
+
+static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dsi_host->port;
+ struct mipi_dsi_packet packet;
+ ssize_t ret;
+ const u8 *header, *data;
+ u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ header = packet.header;
+ data = packet.payload;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
+ data_reg = MIPI_LP_GEN_DATA(port);
+ data_mask = LP_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_mask = LP_CTRL_FIFO_FULL;
+ } else {
+ data_reg = MIPI_HS_GEN_DATA(port);
+ data_mask = HS_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_mask = HS_CTRL_FIFO_FULL;
+ }
+
+ /* note: this is never true for reads */
+ if (packet.payload_length) {
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ write_data(dev_priv, data_reg, packet.payload,
+ packet.payload_length);
+ }
+
+ if (msg->rx_len) {
+ I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ }
+
+ I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+
+ /* ->rx_len is set only for reads */
+ if (msg->rx_len) {
+ data_mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ }
+
+ /* XXX: fix for reads and writes */
+ return 4 + packet.payload_length;
+}
+
+static int intel_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int intel_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
+ .attach = intel_dsi_host_attach,
+ .detach = intel_dsi_host_detach,
+ .transfer = intel_dsi_host_transfer,
+};
+
+static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = &intel_dsi_host_ops;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
+ enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
+
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
@@ -56,12 +251,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dpio_lock);
}
-static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
-{
- return container_of(intel_attached_encoder(connector),
- struct intel_dsi, base);
-}
-
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
@@ -78,14 +267,13 @@ static void intel_dsi_hot_plug(struct intel_encoder *encoder)
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *config)
+ struct intel_crtc_state *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
- struct drm_display_mode *mode = &config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
DRM_DEBUG_KMS("\n");
@@ -95,10 +283,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (intel_dsi->dev.dev_ops->mode_fixup)
- return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
- mode, adjusted_mode);
-
return true;
}
@@ -197,23 +381,24 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
+ enum port port;
DRM_DEBUG_KMS("\n");
- if (is_cmd_mode(intel_dsi))
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
- else {
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ } else {
msleep(20); /* XXX */
- dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, TURN_ON, false, port);
msleep(100);
- if (intel_dsi->dev.dev_ops->enable)
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ drm_panel_enable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_enable(encoder);
}
@@ -226,6 +411,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -237,7 +423,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
- intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
+ intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
tmp = I915_READ(DSPCLK_GATE_D);
@@ -249,13 +435,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (intel_dsi->dev.dev_ops->panel_reset)
- intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+ drm_panel_prepare(intel_dsi->panel);
- if (intel_dsi->dev.dev_ops->send_otp_cmds)
- intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
-
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
@@ -275,12 +458,14 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
- dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
msleep(10);
}
}
@@ -296,7 +481,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
msleep(2);
@@ -322,10 +508,10 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
}
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
- if (intel_dsi->dev.dev_ops->disable)
- intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+ drm_panel_disable(intel_dsi->panel);
- wait_for_dsi_fifo_empty(intel_dsi);
+ for_each_dsi_port(port, intel_dsi->ports)
+ wait_for_dsi_fifo_empty(intel_dsi, port);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -387,8 +573,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
- if (intel_dsi->dev.dev_ops->disable_panel_power)
- intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+ drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
@@ -437,7 +622,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
u32 pclk;
DRM_DEBUG_KMS("\n");
@@ -452,7 +637,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (!pclk)
return;
- pipe_config->adjusted_mode.crtc_clock = pclk;
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
@@ -462,7 +647,6 @@ intel_dsi_mode_valid(struct drm_connector *connector,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
DRM_DEBUG_KMS("\n");
@@ -478,7 +662,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+ return MODE_OK;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -511,7 +695,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -566,9 +750,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum port port;
- unsigned int bpp = intel_crtc->config.pipe_bpp;
+ unsigned int bpp = intel_crtc->config->pipe_bpp;
u32 val, tmp;
u16 mode_hdisplay;
@@ -727,20 +911,7 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
- struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
- struct intel_encoder *intel_encoder = &intel_dsi->base;
- enum intel_display_power_domain power_domain;
- enum drm_connector_status connector_status;
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
-
- DRM_DEBUG_KMS("\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
-
- intel_display_power_get(dev_priv, power_domain);
- connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
- intel_display_power_put(dev_priv, power_domain);
-
- return connector_status;
+ return connector_status_connected;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -766,7 +937,7 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
-static void intel_dsi_destroy(struct drm_connector *connector)
+static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -776,8 +947,20 @@ static void intel_dsi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ if (intel_dsi->panel) {
+ drm_panel_detach(intel_dsi->panel);
+ /* XXX: Logically this call belongs in the panel driver. */
+ drm_panel_remove(intel_dsi->panel);
+ }
+ intel_encoder_destroy(encoder);
+}
+
static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dsi_encoder_destroy,
};
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
@@ -789,8 +972,10 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dsi_detect,
- .destroy = intel_dsi_destroy,
+ .destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -800,9 +985,9 @@ void intel_dsi_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_dsi_device *dsi;
+ enum port port;
unsigned int i;
DRM_DEBUG_KMS("\n");
@@ -851,7 +1036,11 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->unregister = intel_connector_unregister;
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
- if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ if (dev_priv->vbt.dsi.config->dual_link) {
+ /* XXX: does dual link work on either pipe? */
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
intel_encoder->crtc_mask = (1 << PIPE_A);
intel_dsi->ports = (1 << PORT_A);
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
@@ -859,15 +1048,25 @@ void intel_dsi_init(struct drm_device *dev)
intel_dsi->ports = (1 << PORT_C);
}
- for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
- dsi = &intel_dsi_devices[i];
- intel_dsi->dev = *dsi;
+ /* Create a DSI host (and a device) for each port. */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, port);
+ if (!host)
+ goto err;
- if (dsi->dev_ops->init(&intel_dsi->dev))
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
+ intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
+ intel_dsi_drivers[i].panel_id);
+ if (intel_dsi->panel)
break;
}
- if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ if (!intel_dsi->panel) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
@@ -887,13 +1086,23 @@ void intel_dsi_init(struct drm_device *dev)
drm_connector_register(connector);
- fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ drm_panel_attach(intel_dsi->panel, connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_panel_get_modes(intel_dsi->panel);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 8fe2064dd804..2784ac442368 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
/* Dual Link support */
@@ -33,53 +34,13 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
-struct intel_dsi_device {
- unsigned int panel_id;
- const char *name;
- const struct intel_dsi_dev_ops *dev_ops;
- void *dev_priv;
-};
-
-struct intel_dsi_dev_ops {
- bool (*init)(struct intel_dsi_device *dsi);
-
- void (*panel_reset)(struct intel_dsi_device *dsi);
-
- void (*disable_panel_power)(struct intel_dsi_device *dsi);
-
- /* one time programmable commands if needed */
- void (*send_otp_cmds)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*enable)(struct intel_dsi_device *dsi);
-
- /* This callback must be able to assume DSI commands can be sent */
- void (*disable)(struct intel_dsi_device *dsi);
-
- int (*mode_valid)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode);
-
- bool (*mode_fixup)(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- void (*mode_set)(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
-
- bool (*get_hw_state)(struct intel_dsi_device *dev);
-
- struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
-
- void (*destroy) (struct intel_dsi_device *dsi);
-};
+struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
- struct intel_dsi_device dev;
+ struct drm_panel *panel;
+ struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
struct intel_connector *attached_connector;
@@ -137,16 +98,18 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay;
};
-/* XXX: Transitional before dual port configuration */
-static inline enum port intel_dsi_pipe_to_port(enum pipe pipe)
-{
- if (pipe == PIPE_A)
- return PORT_A;
- else if (pipe == PIPE_B)
- return PORT_C;
+struct intel_dsi_host {
+ struct mipi_dsi_host base;
+ struct intel_dsi *intel_dsi;
+ enum port port;
+
+ /* our little hack */
+ struct mipi_dsi_device *device;
+};
- WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe));
- return PORT_C;
+static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct intel_dsi_host, base);
}
#define for_each_dsi_port(__port, __ports_mask) \
@@ -162,6 +125,6 @@ extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
deleted file mode 100644
index 562811c1a9d2..000000000000
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Jani Nikula <jani.nikula@intel.com>
- */
-
-#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
-
-/*
- * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
- * MIPI_COMMAND_ADDRESS registers.
- *
- * Apparently these registers provide a MIPI adapter level way to send (lots of)
- * commands and data to the receiver, without having to write the commands and
- * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
- *
- * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
- * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
- * framebuffer in command mode displays) these are just an optimization that can
- * come later.
- *
- * For memory writes, these should probably be used for performance.
- */
-
-static void print_stat(struct intel_dsi *intel_dsi, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
-
- val = I915_READ(MIPI_INTR_STAT(port));
-
-#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
- DRM_DEBUG_KMS("MIPI_INTR_STAT(%c) = %08x"
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "\n", port_name(port), val,
- STAT_BIT(val, TEARING_EFFECT),
- STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
- STAT_BIT(val, GEN_READ_DATA_AVAIL),
- STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
- STAT_BIT(val, RX_PROT_VIOLATION),
- STAT_BIT(val, RX_INVALID_TX_LENGTH),
- STAT_BIT(val, ACK_WITH_NO_ERROR),
- STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
- STAT_BIT(val, LP_RX_TIMEOUT),
- STAT_BIT(val, HS_TX_TIMEOUT),
- STAT_BIT(val, DPI_FIFO_UNDERRUN),
- STAT_BIT(val, LOW_CONTENTION),
- STAT_BIT(val, HIGH_CONTENTION),
- STAT_BIT(val, TXDSI_VC_ID_INVALID),
- STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
- STAT_BIT(val, TXCHECKSUM_ERROR),
- STAT_BIT(val, TXECC_MULTIBIT_ERROR),
- STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, TXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXDSI_VC_ID_INVALID),
- STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
- STAT_BIT(val, RXCHECKSUM_ERROR),
- STAT_BIT(val, RXECC_MULTIBIT_ERROR),
- STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
- STAT_BIT(val, RXFALSE_CONTROL_ERROR),
- STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
- STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
- STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
- STAT_BIT(val, RXEOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_SYNC_ERROR),
- STAT_BIT(val, RXSOT_ERROR));
-#undef STAT_BIT
-}
-
-enum dsi_type {
- DSI_DCS,
- DSI_GENERIC,
-};
-
-/* enable or disable command mode hs transmissions */
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
- enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp;
- u32 mask = DBI_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
-
- temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(port));
- temp &= DBI_HS_LP_MODE_MASK;
- I915_WRITE(MIPI_HS_LP_DBI_ENABLE(port), enable ? DBI_HS_MODE : DBI_LP_MODE);
-
- intel_dsi->hs = enable;
-}
-
-static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, u16 data, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctrl_reg;
- u32 ctrl;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
- channel, data_type, data);
-
- if (intel_dsi->hs) {
- ctrl_reg = MIPI_HS_GEN_CTRL(port);
- mask = HS_CTRL_FIFO_FULL;
- } else {
- ctrl_reg = MIPI_LP_GEN_CTRL(port);
- mask = LP_CTRL_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
- print_stat(intel_dsi, port);
- }
-
- /*
- * Note: This function is also used for long packets, with length passed
- * as data, since SHORT_PACKET_PARAM_SHIFT ==
- * LONG_PACKET_WORD_COUNT_SHIFT.
- */
- ctrl = data << SHORT_PACKET_PARAM_SHIFT |
- channel << VIRTUAL_CHANNEL_SHIFT |
- data_type << DATA_TYPE_SHIFT;
-
- I915_WRITE(ctrl_reg, ctrl);
-
- return 0;
-}
-
-static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, const u8 *data, int len, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 data_reg;
- int i, j, n;
- u32 mask;
-
- DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
- channel, data_type, len);
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(port);
- mask = HS_DATA_FIFO_FULL;
- } else {
- data_reg = MIPI_LP_GEN_DATA(port);
- mask = LP_DATA_FIFO_FULL;
- }
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
-
- for (i = 0; i < len; i += n) {
- u32 val = 0;
- n = min_t(int, len - i, 4);
-
- for (j = 0; j < n; j++)
- val |= *data++ << 8 * j;
-
- I915_WRITE(data_reg, val);
- /* XXX: check for data fifo full, once that is set, write 4
- * dwords, then wait for not set, then continue. */
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, len, port);
-}
-
-static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
- int channel, const u8 *data, int len,
- enum dsi_type type, enum port port)
-{
- int ret;
-
- if (len == 0) {
- BUG_ON(type == DSI_GENERIC);
- ret = dsi_vc_send_short(intel_dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
- 0, port);
- } else if (len == 1) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0],
- port);
- } else if (len == 2) {
- ret = dsi_vc_send_short(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- (data[1] << 8) | data[0], port);
- } else {
- ret = dsi_vc_send_long(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len,
- port);
- }
-
- return ret;
-}
-
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len, enum port port)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS,
- port);
-}
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len, enum port port)
-{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC,
- port);
-}
-
-static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, enum port port)
-{
- return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
- dcs_cmd, port);
-}
-
-static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 *reqdata,
- int reqlen, enum port port)
-{
- u16 data;
- u8 data_type;
-
- switch (reqlen) {
- case 0:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- break;
- case 1:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- break;
- case 2:
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = (reqdata[1] << 8) | reqdata[0];
- break;
- default:
- BUG();
- }
-
- return dsi_vc_send_short(intel_dsi, channel, data_type, data, port);
-}
-
-static int dsi_read_data_return(struct intel_dsi *intel_dsi,
- u8 *buf, int buflen, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i, len = 0;
- u32 data_reg, val;
-
- if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(port);
- } else {
- data_reg = MIPI_LP_GEN_DATA(port);
- }
-
- while (len < buflen) {
- val = I915_READ(data_reg);
- for (i = 0; i < 4 && len < buflen; i++, len++)
- buf[len] = val >> 8 * i;
- }
-
- return len;
-}
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd, port);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 mask;
- int ret;
-
- /*
- * XXX: should issue multiple read requests and reads if request is
- * longer than MIPI_MAX_RETURN_PKT_SIZE
- */
-
- I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
-
- ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
- reqlen, port);
- if (ret)
- return ret;
-
- mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
-
- ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
- if (ret < 0)
- return ret;
-
- if (ret != buflen)
- return -EIO;
-
- return 0;
-}
-
-/*
- * send a video mode command
- *
- * XXX: commands with data in MIPI_DPI_DATA?
- */
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum port port;
- u32 mask;
-
- /* XXX: pipe, hs */
- if (hs)
- cmd &= ~DPI_LP_MODE;
- else
- cmd |= DPI_LP_MODE;
-
- for_each_dsi_port(port, intel_dsi->ports) {
- /* clear bit */
- I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
-
- /* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_ERROR("Same special packet %02x twice in a row.\n",
- cmd);
-
- I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
-
- mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask,
- 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n",
- cmd);
- }
-
- return 0;
-}
-
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
-{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
- u32 mask;
-
- mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
- LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
-}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 326a5ac55561..886779030f1a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,85 +33,7 @@
#include "intel_drv.h"
#include "intel_dsi.h"
-#define DPI_LP_MODE_EN false
-#define DPI_HS_MODE_EN true
-
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
enum port port);
-int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len, enum port port);
-
-int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len, enum port port);
-
-int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen, enum port port);
-
-int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port);
-
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
-
-/* XXX: questionable write helpers */
-static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, enum port port)
-{
- return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port);
-}
-
-static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, u8 param, enum port port)
-{
- u8 buf[2] = { dcs_cmd, param };
- return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port);
-}
-
-static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
- int channel, enum port port)
-{
- return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port);
-}
-
-static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 param, enum port port)
-{
- return dsi_vc_generic_write(intel_dsi, channel, &param, 1, port);
-}
-
-static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2, enum port port)
-{
- u8 buf[2] = { param1, param2 };
- return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port);
-}
-
-/* XXX: questionable read helpers */
-static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
- int channel, u8 *buf, int buflen, enum port port)
-{
- return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen,
- port);
-}
-
-static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
- int channel, u8 param, u8 *buf,
- int buflen, enum port port)
-{
- return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen,
- port);
-}
-
-static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2,
- u8 *buf, int buflen, enum port port)
-{
- u8 req[2] = { param1, param2 };
-
- return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen,
- port);
-}
-
-
#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 5493aef5a6a3..d2cd8d5b27a1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -28,6 +28,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
+#include <drm/drm_panel.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
@@ -35,7 +36,16 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "intel_dsi_cmd.h"
+
+struct vbt_panel {
+ struct drm_panel panel;
+ struct intel_dsi *intel_dsi;
+};
+
+static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct vbt_panel, panel);
+}
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
@@ -99,16 +109,21 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
return port ? PORT_C : PORT_A;
}
-static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
+ const u8 *data)
{
- u8 type, byte, mode, vc, seq_port;
+ struct mipi_dsi_device *dsi_device;
+ u8 type, flags, seq_port;
u16 len;
enum port port;
- byte = *data++;
- mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
- vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
- seq_port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+ flags = *data++;
+ type = *data++;
+
+ len = *((u16 *) data);
+ data += 2;
+
+ seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
/* For DSI single link on Port A & C, the seq_port value which is
* parsed from Sequence Block#53 of VBT has been set to 0
@@ -119,24 +134,29 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
port = PORT_C;
else
port = intel_dsi_seq_port_to_port(seq_port);
- /* LP or HS mode */
- intel_dsi->hs = mode;
- /* get packet type and increment the pointer */
- type = *data++;
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ if (!dsi_device) {
+ DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ goto out;
+ }
- len = *((u16 *) data);
- data += 2;
+ if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ else
+ dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- dsi_vc_generic_write_0(intel_dsi, vc, port);
+ mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- dsi_vc_generic_write_1(intel_dsi, vc, *data, port);
+ mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1), port);
+ mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
@@ -144,30 +164,31 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- dsi_vc_generic_write(intel_dsi, vc, data, len, port);
+ mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- dsi_vc_dcs_write_0(intel_dsi, vc, *data, port);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1), port);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
- dsi_vc_dcs_write(intel_dsi, vc, data, len, port);
+ mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
+out:
data += len;
return data;
}
-static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- u32 delay = *((u32 *) data);
+ u32 delay = *((const u32 *) data);
usleep_range(delay, delay + 10);
data += 4;
@@ -175,7 +196,7 @@ static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
u8 gpio, action;
u16 function, pad;
@@ -208,7 +229,8 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
return data;
}
-typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
+typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
+ const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
@@ -232,13 +254,12 @@ static const char * const seq_name[] = {
"MIPI_SEQ_DEASSERT_RESET"
};
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
{
- u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
- if (!sequence)
+ if (!data)
return;
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
@@ -271,14 +292,103 @@ static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
}
}
-static bool generic_init(struct intel_dsi_device *dsi)
+static int vbt_panel_prepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_unprepare(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_enable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_disable(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const u8 *sequence;
+
+ sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ generic_exec_sequence(intel_dsi, sequence);
+
+ return 0;
+}
+
+static int vbt_panel_get_modes(struct drm_panel *panel)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *mode;
+
+ if (!panel->connector)
+ return 0;
+
+ mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!mode)
+ return 0;
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs vbt_panel_funcs = {
+ .disable = vbt_panel_disable,
+ .unprepare = vbt_panel_unprepare,
+ .prepare = vbt_panel_prepare,
+ .enable = vbt_panel_enable,
+ .get_modes = vbt_panel_get_modes,
+};
+
+struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ struct vbt_panel *vbt_panel;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
@@ -288,6 +398,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -298,9 +409,6 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- if (intel_dsi->dual_link)
- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
-
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
@@ -345,7 +453,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
+ return NULL;
}
burst_mode_ratio = DIV_ROUND_UP(
@@ -355,7 +463,7 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
- return false;
+ return NULL;
}
} else
burst_mode_ratio = 100;
@@ -556,110 +664,18 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
- return true;
-}
-
-static int generic_mode_valid(struct intel_dsi_device *dsi,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static bool generic_mode_fixup(struct intel_dsi_device *dsi,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode) {
- return true;
-}
-
-static void generic_panel_reset(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable_panel_power(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ /* This is cheating a bit with the cleanup. */
+ vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_enable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
-
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static void generic_disable(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+ vbt_panel->intel_dsi = intel_dsi;
+ drm_panel_init(&vbt_panel->panel);
+ vbt_panel->panel.funcs = &vbt_panel_funcs;
+ drm_panel_add(&vbt_panel->panel);
- generic_exec_sequence(intel_dsi, sequence);
-}
-
-static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
-{
- return connector_status_connected;
-}
-
-static bool generic_get_hw_state(struct intel_dsi_device *dev)
-{
- return true;
-}
-
-static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
-{
- struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* a regular driver would get the device in probe */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
+ }
- dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
- return dev_priv->vbt.lfp_lvds_vbt_mode;
+ return &vbt_panel->panel;
}
-
-static void generic_destroy(struct intel_dsi_device *dsi) { }
-
-/* Callbacks. We might not need them all. */
-struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
- .init = generic_init,
- .mode_valid = generic_mode_valid,
- .mode_fixup = generic_mode_fixup,
- .panel_reset = generic_panel_reset,
- .disable_panel_power = generic_disable_panel_power,
- .send_otp_cmds = generic_send_otp_cmds,
- .enable = generic_enable,
- .disable = generic_disable,
- .detect = generic_detect,
- .get_hw_state = generic_get_hw_state,
- .get_modes = generic_get_modes,
- .destroy = generic_destroy,
-};
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index e40e3df33517..d8579510beb0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -144,7 +145,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -160,9 +161,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -186,8 +187,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config.requested_mode,
- &crtc->config.adjusted_mode);
+ &crtc->config->base.mode,
+ &crtc->config->base.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -200,7 +201,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
- struct intel_crtc_config *config;
+ struct intel_crtc_state *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
@@ -221,7 +222,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
- config = &to_intel_crtc(crtc)->config;
+ config = to_intel_crtc(crtc)->config;
intel_dvo->base.connectors_active = true;
@@ -261,10 +262,10 @@ intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -295,7 +296,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
@@ -390,6 +391,8 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_connector_atomic_get_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 4daceaeeb30d..624d1d92d284 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -182,7 +182,7 @@ static void snb_fbc_blit_update(struct drm_device *dev)
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
@@ -195,7 +195,7 @@ static void snb_fbc_blit_update(struct drm_device *dev)
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -542,7 +542,7 @@ void intel_fbc_update(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
- adjusted_mode = &intel_crtc->config.adjusted_mode;
+ adjusted_mode = &intel_crtc->config->base.adjusted_mode;
if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
@@ -572,8 +572,8 @@ void intel_fbc_update(struct drm_device *dev)
max_width = 2048;
max_height = 1536;
}
- if (intel_crtc->config.pipe_src_w > max_width ||
- intel_crtc->config.pipe_src_h > max_height) {
+ if (intel_crtc->config->pipe_src_w > max_width ||
+ intel_crtc->config->pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
@@ -595,7 +595,7 @@ void intel_fbc_update(struct drm_device *dev)
goto out_disable;
}
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
+ crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 850cf7d6578c..3001a8674611 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -443,7 +443,7 @@ retry:
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
- &to_intel_crtc(encoder->crtc)->config);
+ to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
@@ -531,7 +531,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct intel_plane_config *plane_config = NULL;
+ struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
@@ -581,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -592,13 +592,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
- cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
+ cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = intel_fb_align_height(dev, cur_size,
+ plane_config->tiling);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config.adjusted_mode.crtc_hdisplay,
- intel_crtc->config.adjusted_mode.crtc_vdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
+ intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77af512d2d35..04e248dd2259 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -341,7 +341,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
}
/**
- * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
*
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 79f6d72179c5..73cb6e036445 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -157,6 +157,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
intel_psr_invalidate(dev, obj->frontbuffer_bits);
+ intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
}
/**
@@ -182,6 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+ intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
/*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3abc2000fce9..995c5b261f4f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -337,13 +338,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
data_reg = hsw_infoframe_data_reg(type,
- intel_crtc->config.cpu_transcoder,
+ intel_crtc->config->cpu_transcoder,
dev_priv);
if (data_reg == 0)
return;
@@ -371,7 +372,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
@@ -436,7 +437,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
}
if (intel_hdmi->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -672,7 +673,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -700,7 +701,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@@ -711,12 +712,12 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (crtc->config.pipe_bpp > 24)
+ if (crtc->config->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (crtc->config.has_hdmi_sink)
+ if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev))
@@ -759,7 +760,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -792,7 +793,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -802,7 +803,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -814,7 +815,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_crtc->config.has_audio)
+ if (intel_crtc->config->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -845,8 +846,8 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (intel_crtc->config.has_audio) {
- WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ if (intel_crtc->config->has_audio) {
+ WARN_ON(!intel_crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_audio_codec_enable(encoder);
@@ -866,7 +867,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
- if (crtc->config.has_audio)
+ if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -975,12 +976,12 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
@@ -1252,12 +1253,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
}
@@ -1270,7 +1271,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
@@ -1302,7 +1303,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1467,7 +1468,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
+ &intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1593,7 +1594,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config.has_hdmi_sink,
+ intel_crtc->config->has_hdmi_sink,
adjusted_mode);
intel_enable_hdmi(encoder);
@@ -1614,7 +1615,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e405b61cdac5..a94346fee160 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -283,7 +283,6 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
- unsigned long flags;
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
@@ -297,63 +296,17 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
- /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
- * are in progress.
- *
- * The other problem is that we can't just call gen6_gt_force_wake_get()
- * because that function calls intel_runtime_pm_get(), which might sleep.
- * Instead, we do the runtime_pm_get/put when creating/destroying requests.
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_ALL);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(RING_ELSP(ring), desc[1]);
I915_WRITE(RING_ELSP(ring), desc[0]);
I915_WRITE(RING_ELSP(ring), desc[3]);
+
/* The context is automatically loaded after the following */
I915_WRITE(RING_ELSP(ring), desc[2]);
/* ELSP is a wo register, so use another nearby reg for posting instead */
POSTING_READ(RING_EXECLIST_STATUS(ring));
-
- /* Release Force Wakeup (see the big comment above). */
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- if (INTEL_INFO(dev)->gen >= 9) {
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
- }
- } else {
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_ALL);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
@@ -404,8 +357,8 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
- struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+ struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
assert_spin_locked(&ring->execlist_lock);
@@ -445,12 +398,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct intel_ctx_submit_request *head_req;
+ struct drm_i915_gem_request *head_req;
assert_spin_locked(&ring->execlist_lock);
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (head_req != NULL) {
@@ -534,24 +487,34 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
- u32 tail)
+ u32 tail,
+ struct drm_i915_gem_request *request)
{
- struct intel_ctx_submit_request *req = NULL, *cursor;
+ struct drm_i915_gem_request *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
int num_elements = 0;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- return -ENOMEM;
- req->ctx = to;
- i915_gem_context_reference(req->ctx);
-
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
- req->ring = ring;
- req->tail = tail;
+ if (!request) {
+ /*
+ * If there isn't a request associated with this submission,
+ * create one as a temporary holder.
+ */
+ WARN(1, "execlist context submission without request");
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+ request->ring = ring;
+ request->ctx = to;
+ } else {
+ WARN_ON(to != request->ctx);
+ }
+ request->tail = tail;
+ i915_gem_request_reference(request);
+ i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@@ -562,10 +525,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
break;
if (num_elements > 2) {
- struct intel_ctx_submit_request *tail_req;
+ struct drm_i915_gem_request *tail_req;
tail_req = list_last_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (to == tail_req->ctx) {
@@ -577,7 +540,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
}
}
- list_add_tail(&req->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
@@ -586,7 +549,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0;
}
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
uint32_t flush_domains;
@@ -596,7 +560,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->emit_flush(ringbuf, ctx,
+ I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
@@ -605,6 +570,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
}
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -632,7 +598,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf);
+ return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
/**
@@ -712,13 +678,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, vmas);
+ ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -731,7 +697,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
if (ret)
return ret;
@@ -743,7 +709,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_gem_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
@@ -765,9 +731,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(req->ctx);
+ i915_gem_context_unreference(ctx);
list_del(&req->execlist_link);
- kfree(req);
+ i915_gem_request_unreference(req);
}
}
@@ -793,7 +759,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
struct intel_engine_cs *ring = ringbuf->ring;
int ret;
@@ -801,7 +768,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -818,17 +785,18 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
+void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
- struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
intel_logical_ring_advance(ringbuf);
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(ring, ctx, ringbuf->tail);
+ execlists_context_queue(ring, ctx, ringbuf->tail, request);
}
static int intel_lr_context_pin(struct intel_engine_cs *ring,
@@ -839,11 +807,11 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (ctx->engine[ring->id].unpin_count++ == 0) {
+ if (ctx->engine[ring->id].pin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
- goto reset_unpin_count;
+ goto reset_pin_count;
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
@@ -854,8 +822,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
-reset_unpin_count:
- ctx->engine[ring->id].unpin_count = 0;
+reset_pin_count:
+ ctx->engine[ring->id].pin_count = 0;
return ret;
}
@@ -868,7 +836,7 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--ctx->engine[ring->id].unpin_count == 0) {
+ if (--ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
@@ -959,6 +927,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
}
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -972,7 +941,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
/* Force the context submission in case we have been skipping it */
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
@@ -1007,13 +976,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
return ret;
}
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, rem);
+ int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
if (ret)
return ret;
@@ -1030,18 +1000,19 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
return 0;
}
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int bytes)
{
int ret;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf);
+ ret = logical_ring_wrap_buffer(ringbuf, ctx);
if (unlikely(ret))
return ret;
}
if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, bytes);
+ ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
if (unlikely(ret))
return ret;
}
@@ -1062,7 +1033,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int num_dwords)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
@@ -1074,12 +1046,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
if (ret)
return ret;
- ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+ ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
/* Preallocate the olr before touching the ring */
- ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
+ ret = logical_ring_alloc_request(ring, ctx);
if (ret)
return ret;
@@ -1100,11 +1072,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
- ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
if (ret)
return ret;
@@ -1118,7 +1090,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf);
+ ret = logical_ring_flush_all_caches(ringbuf, ctx);
if (ret)
return ret;
@@ -1169,12 +1141,13 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -1222,6 +1195,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
}
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 unused)
{
@@ -1231,7 +1205,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 4);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 4);
if (ret)
return ret;
@@ -1260,6 +1234,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
}
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1286,7 +1261,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, ctx, 6);
if (ret)
return ret;
@@ -1311,13 +1286,14 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
+static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, 6);
+ ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
if (ret)
return ret;
@@ -1333,7 +1309,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(ringbuf);
+ intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
return 0;
}
@@ -1620,6 +1596,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
return 0;
ret = ring->emit_bb_start(ringbuf,
+ ctx,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
@@ -1774,6 +1751,7 @@ void intel_lr_context_free(struct intel_context *ctx)
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
+ WARN_ON(ctx->engine[ring->id].pin_count);
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
@@ -1876,7 +1854,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
}
ringbuf->ring = ring;
- ringbuf->FIXME_lrc_ctx = ctx;
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 960fcbd2c98a..6f2d7da594f6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -38,8 +38,12 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx);
+void intel_logical_ring_advance_and_submit(
+ struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -61,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ int num_dwords);
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
@@ -83,35 +89,6 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
-/**
- * struct intel_ctx_submit_request - queued context submission request
- * @ctx: Context to submit to the ELSP.
- * @ring: Engine to submit it to.
- * @tail: how far in the context's ringbuffer this request goes to.
- * @execlist_link: link in the submission queue.
- * @work: workqueue for processing this request in a bottom half.
- * @elsp_submitted: no. of times this request has been sent to the ELSP.
- *
- * The ELSP only accepts two elements at a time, so we queue context/tail
- * pairs on a given queue (ring->execlist_queue) until the hardware is
- * available. The queue serves a double purpose: we also use it to keep track
- * of the up to 2 contexts currently in the hardware (usually one in execution
- * and the other queued up by the GPU): We only remove elements from the head
- * of the queue when the hardware informs us that an element has been
- * completed.
- *
- * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
- */
-struct intel_ctx_submit_request {
- struct intel_context *ctx;
- struct intel_engine_cs *ring;
- u32 tail;
-
- struct list_head execlist_link;
-
- int elsp_submitted;
-};
-
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 14654d628ca4..071b96d6e146 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -93,7 +94,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -115,7 +116,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
@@ -129,7 +130,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -139,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
+ &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -167,7 +168,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config->gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -190,7 +191,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (crtc->config.dither && crtc->config.pipe_bpp == 18)
+ if (crtc->config->dither && crtc->config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -277,14 +278,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
unsigned int lvds_bpp;
@@ -531,7 +532,9 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 973c9de3b87d..f93dfc174495 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -856,7 +856,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
return -EINVAL;
/* can't use the overlay with double wide pipe */
- if (crtc->config.double_wide)
+ if (crtc->config->double_wide)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..d7be68a7bbda 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -98,13 +98,13 @@ intel_find_panel_downclock(struct drm_device *dev,
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_display_mode *adjusted_mode;
int x, y, width, height;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
x = y = width = height = 0;
@@ -223,10 +223,10 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -243,11 +243,11 @@ static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
-static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -301,14 +301,14 @@ static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
}
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_config *pipe_config,
+ struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode;
- adjusted_mode = &pipe_config->adjusted_mode;
+ adjusted_mode = &pipe_config->base.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 03fc7f2ee9d1..6ece663f3394 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -76,7 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
-
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -539,7 +538,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -608,10 +607,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
@@ -695,10 +694,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
@@ -729,7 +728,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
int entries;
- int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
@@ -1059,10 +1058,10 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(crtc)->config.adjusted_mode;
+ &to_intel_crtc(crtc)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1144,7 +1143,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1166,7 +1165,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev))
cpp = 4;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1205,10 +1204,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(enabled)->config.adjusted_mode;
+ &to_intel_crtc(enabled)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
+ int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1261,7 +1260,7 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
@@ -1280,17 +1279,17 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
+ pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- if (intel_crtc->config.pch_pfit.enabled) {
+ if (intel_crtc->config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
+ uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
- pipe_w = intel_crtc->config.pipe_src_w;
- pipe_h = intel_crtc->config.pipe_src_h;
+ pipe_w = intel_crtc->config->pipe_src_w;
+ pipe_h = intel_crtc->config->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -1643,7 +1642,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
if (!intel_crtc_active(crtc))
@@ -1903,11 +1902,11 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
return;
p->active = true;
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
@@ -2556,10 +2555,10 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
{
/* TODO: Take into account the scalers once we support them */
- return config->adjusted_mode.crtc_clock;
+ return config->base.adjusted_mode.crtc_clock;
}
/*
@@ -2647,8 +2646,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->active = intel_crtc_active(crtc);
if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
- p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+ p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
/*
* For now, assume primary and cursor planes are always enabled.
@@ -2656,8 +2655,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel =
crtc->primary->fb->bits_per_pixel / 8;
- p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
- p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+ p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4;
@@ -3800,8 +3799,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- /* Latest VLV doesn't need to force the gfx clock */
- if (dev->pdev->revision >= 0xd) {
+ /* CHV and latest VLV don't need to force the gfx clock */
+ if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
return;
}
@@ -3840,9 +3839,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3884,7 +3881,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -3892,6 +3889,7 @@ static void gen9_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN9_PG_ENABLE, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -3915,11 +3913,11 @@ static void valleyview_disable_rps(struct drm_device *dev)
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -4025,9 +4023,39 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
}
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ gen6_init_rps_frequencies(dev);
+
+ I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
+ I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
+ I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
+ I915_WRITE(GEN6_PMINTRMSK, 0x6);
+ I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ gen6_enable_rps_interrupts(dev);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen9_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0;
int unused;
@@ -4037,7 +4065,7 @@ static void gen9_enable_rps(struct drm_device *dev)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4051,6 +4079,10 @@ static void gen9_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+ /* 2c: Program Coarse Power Gating Policies. */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
@@ -4060,7 +4092,10 @@ static void gen9_enable_rps(struct drm_device *dev)
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled */
+ I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 3 : 0);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4076,7 +4111,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4143,7 +4178,7 @@ static void gen8_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4171,7 +4206,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
@@ -4251,7 +4286,7 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void __gen6_update_ring_freq(struct drm_device *dev)
@@ -4338,11 +4373,35 @@ void gen6_update_ring_freq(struct drm_device *dev)
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ switch (INTEL_INFO(dev)->eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
+ break;
+ case 12:
+ /* (2 * 6) config */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
+ break;
+ }
+ rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+ }
return rp0;
}
@@ -4358,20 +4417,36 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rp1;
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
-
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
+ rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
+ } else {
+ /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_MAX_FREQ_MASK);
+ }
return rp1;
}
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
u32 val, rpn;
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ if (dev->pdev->revision >= 0x20) {
+ val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
+ rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
+ FB_GFX_FREQ_FUSE_MASK);
+ } else { /* For pre-production hardware */
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
+ PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
+ }
+
return rpn;
}
@@ -4542,22 +4617,22 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
/* Preserve min/max settings in case of re-init */
@@ -4611,22 +4686,22 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
WARN_ONCE((dev_priv->rps.max_freq |
@@ -4670,7 +4745,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2a: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4681,7 +4759,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4695,11 +4774,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
/* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
@@ -4707,14 +4787,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* WaDisablePwrmtrEvent:chv (pre-production hw) */
- I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
- I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
@@ -4729,16 +4805,16 @@ static void cherryview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_device *dev)
@@ -4759,15 +4835,18 @@ static void valleyview_enable_rps(struct drm_device *dev)
}
/* If VLV, Forcewake all wells, else re-direct to regular path */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ /* Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -4810,16 +4889,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5527,7 +5606,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rc6(dev);
gen9_enable_rps(dev);
+ __gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -6147,6 +6228,17 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
+
+ /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -6521,28 +6613,24 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_gpu_freq(dev_priv, val);
+ return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_gpu_freq(dev_priv, val);
-
- return ret;
+ return byt_gpu_freq(dev_priv, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int ret = -1;
-
if (IS_CHERRYVIEW(dev_priv->dev))
- ret = chv_freq_opcode(dev_priv, val);
+ return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
- ret = byt_freq_opcode(dev_priv, val);
-
- return ret;
+ return byt_freq_opcode(dev_priv, val);
+ else
+ return val / GT_FREQUENCY_MULTIPLIER;
}
void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ae193ec464a..b9f40c2e0af7 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -79,8 +79,8 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
@@ -142,6 +142,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
+ uint32_t aux_data_reg, aux_ctl_reg;
int precharge = 0x3;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
@@ -164,16 +165,34 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
+ aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
+ DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
+
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ I915_WRITE(aux_data_reg + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ if (INTEL_INFO(dev)->gen >= 9) {
+ uint32_t val;
+
+ val = I915_READ(aux_ctl_reg);
+ val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
+ val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
+ val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
+ val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ /* Use hardcoded data values for PSR */
+ val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
+ I915_WRITE(aux_ctl_reg, val);
+ } else {
+ I915_WRITE(aux_ctl_reg,
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+ }
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -263,14 +282,14 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
}
if (IS_HASWELL(dev) &&
- I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
if (IS_HASWELL(dev) &&
- intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
@@ -351,6 +370,9 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 23020d67329b..0bd3976d88e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -539,7 +539,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (!stop_ring(ring)) {
/* G45 ring initialization often fails to reset head to zero */
@@ -611,7 +611,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -851,9 +851,25 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ /* Wa4x4STCOptimizationDisable:chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
return 0;
}
@@ -1949,7 +1965,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
return 0;
list_for_each_entry(request, &ring->request_list, list) {
- if (__intel_ring_space(request->tail, ringbuf->tail,
+ if (__intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size) >= n) {
break;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6dbb6f462007..714f3fdd57d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -99,13 +99,6 @@ struct intel_ringbuffer {
struct intel_engine_cs *ring;
- /*
- * FIXME: This backpointer is an artifact of the history of how the
- * execlist patches came into being. It will get removed once the basic
- * code has landed.
- */
- struct intel_context *FIXME_lrc_ctx;
-
u32 head;
u32 tail;
int space;
@@ -123,6 +116,8 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
+struct intel_context;
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
@@ -239,11 +234,14 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf);
+ int (*emit_request)(struct intel_ringbuffer *ringbuf,
+ struct drm_i915_gem_request *request);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
u64 offset, unsigned flags);
/**
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8bf7bb4a12bc..49695d7d51e3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -703,6 +703,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -724,24 +728,30 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_PIPE_A_POWER_DOMAINS ( \
@@ -761,20 +771,25 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4e3d362931e9..64ad2b40179f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -1007,7 +1008,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
if (intel_sdvo->rgb_quant_range_selectable) {
- if (intel_crtc->config.limited_color_range)
+ if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -1085,7 +1086,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1112,11 +1113,11 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->requested_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->base.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1181,8 +1182,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &crtc->config.requested_mode;
+ &crtc->config->base.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1224,7 +1225,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (crtc->config.has_hdmi_sink) {
+ if (crtc->config->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
@@ -1244,7 +1245,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (crtc->config.pixel_multiplier) {
+ switch (crtc->config->pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1259,7 +1260,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1289,7 +1290,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config->pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
@@ -1338,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1370,7 +1371,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->adjusted_mode.flags |= flags;
+ pipe_config->base.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1392,7 +1393,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
- pipe_config->adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -2190,7 +2191,9 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 01d841ea3140..3c42eeffa3cb 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -75,7 +75,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return 0;
}
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
@@ -89,7 +89,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
return val;
}
-void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dca3f70ef1ba..0a52c44ad03d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,7 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
- const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
@@ -256,7 +256,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
default:
BUG();
}
- if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
plane_ctl |= PLANE_CTL_ENABLE;
@@ -493,7 +493,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@@ -684,7 +684,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@@ -884,7 +884,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
- if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@@ -1125,7 +1125,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
@@ -1166,7 +1166,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
drm_rect_height(dst) * vscale - drm_rect_height(src));
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
- intel_plane->rotation);
+ state->base.rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src->x1 < (int) state->base.src_x ||
@@ -1362,33 +1362,6 @@ out_unlock:
return ret;
}
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint64_t old_val;
- int ret = -ENOENT;
-
- if (prop == dev->mode_config.rotation_property) {
- /* exactly one rotation angle please */
- if (hweight32(val & 0xf) != 1)
- return -EINVAL;
-
- if (intel_plane->rotation == val)
- return 0;
-
- old_val = intel_plane->rotation;
- intel_plane->rotation = val;
- ret = intel_plane_restore(plane);
- if (ret)
- intel_plane->rotation = old_val;
- }
-
- return ret;
-}
-
int intel_plane_restore(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
@@ -1401,15 +1374,6 @@ int intel_plane_restore(struct drm_plane *plane)
plane->state->src_w, plane->state->src_h);
}
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
- .update_plane = drm_plane_helper_update,
- .disable_plane = drm_plane_helper_disable,
- .destroy = intel_plane_destroy,
- .set_property = intel_plane_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
-};
-
static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1457,6 +1421,7 @@ int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
struct intel_plane *intel_plane;
+ struct intel_plane_state *state;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
@@ -1469,12 +1434,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (!intel_plane)
return -ENOMEM;
- intel_plane->base.state =
- intel_plane_duplicate_state(&intel_plane->base);
- if (intel_plane->base.state == NULL) {
+ state = intel_create_plane_state(&intel_plane->base);
+ if (!state) {
kfree(intel_plane);
return -ENOMEM;
}
+ intel_plane->base.state = &state->base;
switch (INTEL_INFO(dev)->gen) {
case 5:
@@ -1545,12 +1510,11 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
- intel_plane->rotation = BIT(DRM_ROTATE_0);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_sprite_plane_funcs,
+ &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY);
if (ret) {
@@ -1567,7 +1531,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (dev->mode_config.rotation_property)
drm_object_attach_property(&intel_plane->base.base,
dev->mode_config.rotation_property,
- intel_plane->rotation);
+ state->base.rotation);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6f5f59b880f5..892d23c8479d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -31,6 +31,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
@@ -908,14 +909,14 @@ intel_tv_mode_valid(struct drm_connector *connector,
static void
intel_tv_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -923,12 +924,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
- pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
+ pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->adjusted_mode.flags = 0;
+ pipe_config->base.adjusted_mode.flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
@@ -1512,7 +1513,9 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
+ .atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e9561de382aa..76b60a3538b2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,8 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include <linux/pm_runtime.h>
+
#define FORCEWAKE_ACK_TIMEOUT_MS 2
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
@@ -40,6 +42,26 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+static const char * const forcewake_domain_names[] = {
+ "render",
+ "blitter",
+ "media",
+};
+
+const char *
+intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
+{
+ BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
+ FW_DOMAIN_ID_COUNT);
+
+ if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
+ return forcewake_domain_names[id];
+
+ WARN_ON(id);
+
+ return "unknown";
+}
+
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
@@ -47,73 +69,127 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
"Device suspended\n");
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
+ WARN_ON(d->reg_set == 0);
+ __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
}
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ mod_timer_pinned(&d->timer, jiffies + 1);
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static inline void
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- __raw_i915_write32(dev_priv, FORCEWAKE, 1);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+static inline void
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+static inline void
+fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+{
+ if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+ DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
+ intel_uncore_forcewake_domain_to_str(d->id));
+}
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+static inline void
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+{
+ __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
}
-static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static inline void
+fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ /* something from same cacheline, but not from the set register */
+ if (d->reg_post)
+ __raw_posting_read(d->i915, d->reg_post);
}
-static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void
+fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
- u32 forcewake_ack;
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
+ fw_domain_wait_ack(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+static void
+fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ fw_domain_put(d);
+ fw_domain_posting_read(d);
+ }
+}
- if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+static void
+fw_domains_posting_read(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
+
+ /* No need to do for all, just do for first found */
+ for_each_fw_domain(d, dev_priv, id) {
+ fw_domain_posting_read(d);
+ break;
+ }
+}
- /* WaRsForcewakeWaitTC0:ivb,hsw */
+static void
+fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ enum forcewake_domain_id id;
+
+ WARN_ON(dev_priv->uncore.fw_domains == 0);
+
+ for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+ fw_domain_reset(d);
+
+ fw_domains_posting_read(dev_priv);
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ fw_domains_get(dev_priv, fw_domains);
+
+ /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -126,27 +202,13 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- __raw_posting_read(dev_priv, ECOBUS);
+ fw_domains_put(dev_priv, fw_domains);
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_MT,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- __raw_posting_read(dev_priv, ECOBUS);
-
- if (IS_GEN7(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -174,332 +236,78 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
-}
-
-static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for media to ack.\n");
- }
-}
-
-static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
-{
-
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* something from same cacheline, but !FORCEWAKE_VLV */
- __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
- if (!IS_CHERRYVIEW(dev_priv->dev))
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (fw_engine & FORCEWAKE_RENDER &&
- dev_priv->uncore.fw_rendercount++ != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- if (fw_engine & FORCEWAKE_MEDIA &&
- dev_priv->uncore.fw_mediacount++ != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
-
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+static void intel_uncore_fw_release_timer(unsigned long arg)
{
+ struct intel_uncore_forcewake_domain *domain = (void *)arg;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ assert_device_not_suspended(domain->i915);
- if (fw_engine & FORCEWAKE_RENDER) {
- WARN_ON(!dev_priv->uncore.fw_rendercount);
- if (--dev_priv->uncore.fw_rendercount != 0)
- fw_engine &= ~FORCEWAKE_RENDER;
- }
-
- if (fw_engine & FORCEWAKE_MEDIA) {
- WARN_ON(!dev_priv->uncore.fw_mediacount);
- if (--dev_priv->uncore.fw_mediacount != 0)
- fw_engine &= ~FORCEWAKE_MEDIA;
- }
+ spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ if (WARN_ON(domain->wake_count == 0))
+ domain->wake_count++;
- if (fw_engine)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
+ if (--domain->wake_count == 0)
+ domain->i915->uncore.funcs.force_wake_put(domain->i915,
+ 1 << domain->id);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
}
-static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(0xffff));
-}
-
-static void
-__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_RENDER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Render to ack.\n");
- }
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_MEDIA_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Media to ack.\n");
- }
-
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
-
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((__raw_i915_read32(dev_priv,
- FORCEWAKE_ACK_BLITTER_GEN9) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
- }
-}
-
-static void
-__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- /* Check for Render Engine */
- if (FORCEWAKE_RENDER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Media Engine */
- if (FORCEWAKE_MEDIA & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
- /* Check for Blitter Engine */
- if (FORCEWAKE_BLITTER & fw_engine)
- __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-}
-
-static void
-gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ int retry_count = 100;
+ enum forcewake_domain_id id;
+ enum forcewake_domains fw = 0, active_domains;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (FORCEWAKE_RENDER & fw_engine) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- }
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly. Wait until all pending
+ * timers are run before holding.
+ */
+ while (1) {
+ active_domains = 0;
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (del_timer_sync(&domain->timer) == 0)
+ continue;
- if (FORCEWAKE_BLITTER & fw_engine) {
- if (dev_priv->uncore.fw_blittercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_BLITTER);
- }
+ intel_uncore_fw_release_timer((unsigned long)domain);
+ }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-static void
-gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain(domain, dev_priv, id) {
+ if (timer_pending(&domain->timer))
+ active_domains |= (1 << id);
+ }
- if (FORCEWAKE_RENDER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_rendercount == 0);
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- }
+ if (active_domains == 0)
+ break;
- if (FORCEWAKE_MEDIA & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_mediacount == 0);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ if (--retry_count == 0) {
+ DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ break;
+ }
- if (FORCEWAKE_BLITTER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_blittercount == 0);
- if (--dev_priv->uncore.fw_blittercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_BLITTER);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ cond_resched();
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void gen6_force_wake_timer(unsigned long arg)
-{
- struct drm_i915_private *dev_priv = (void *)arg;
- unsigned long irqflags;
-
- assert_device_not_suspended(dev_priv);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
-
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
- if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
- gen6_force_wake_timer((unsigned long)dev_priv);
-
- /* Hold uncore.lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(active_domains);
- if (IS_VALLEYVIEW(dev))
- vlv_force_wake_reset(dev_priv);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
- __gen6_gt_force_wake_reset(dev_priv);
+ for_each_fw_domain(domain, dev_priv, id)
+ if (domain->wake_count)
+ fw |= 1 << id;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- __gen7_gt_force_wake_mt_reset(dev_priv);
+ if (fw)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- if (IS_GEN9(dev))
- __gen9_gt_force_wake_mt_reset(dev_priv);
+ fw_domains_reset(dev_priv, FORCEWAKE_ALL);
if (restore) { /* If reset with a user forcewake, try to restore */
- unsigned fw = 0;
-
- if (IS_VALLEYVIEW(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
- } else if (IS_GEN9(dev)) {
- if (dev_priv->uncore.fw_rendercount)
- fw |= FORCEWAKE_RENDER;
-
- if (dev_priv->uncore.fw_mediacount)
- fw |= FORCEWAKE_MEDIA;
-
- if (dev_priv->uncore.fw_blittercount)
- fw |= FORCEWAKE_BLITTER;
- } else {
- if (dev_priv->uncore.forcewake_count)
- fw = FORCEWAKE_ALL;
- }
-
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
@@ -509,17 +317,16 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
GT_FIFO_FREE_ENTRIES_MASK;
}
+ if (!restore)
+ assert_forcewakes_inactive(dev_priv);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
- bool restore_forcewake)
+static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
@@ -530,6 +337,15 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
+}
+
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -551,81 +367,92 @@ void intel_uncore_sanitize(struct drm_device *dev)
intel_disable_gt_powersave(dev);
}
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
+/**
+ * intel_uncore_forcewake_get - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * This function can be used get GT's forcewake domain references.
+ * Normal register access will handle the forcewake domains automatically.
+ * However if some sequence requires the GT to not power down a particular
+ * forcewake domains this function should be called at the beginning of the
+ * sequence. And subsequently the reference should be dropped by symmetric
+ * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
+ * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- intel_runtime_pm_get(dev_priv);
-
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev))
- return gen9_force_wake_get(dev_priv, fw_engine);
+ WARN_ON(dev_priv->pm.suspended);
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev))
- return vlv_force_wake_get(dev_priv, fw_engine);
+ fw_domains &= dev_priv->uncore.fw_domains;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count++)
+ fw_domains &= ~(1 << id);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-/*
- * see gen6_gt_force_wake_get()
+/**
+ * intel_uncore_forcewake_put - release a forcewake domain reference
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to put references
+ *
+ * This function drops the device-level forcewakes for specified
+ * domains obtained by intel_uncore_forcewake_get().
*/
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- bool delayed = false;
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
- /* Redirect to Gen9 specific routine */
- if (IS_GEN9(dev_priv->dev)) {
- gen9_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ fw_domains &= dev_priv->uncore.fw_domains;
- /* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev)) {
- vlv_force_wake_put(dev_priv, fw_engine);
- goto out;
- }
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (WARN_ON(domain->wake_count == 0))
+ continue;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- WARN_ON(!dev_priv->uncore.forcewake_count);
+ if (--domain->wake_count)
+ continue;
- if (--dev_priv->uncore.forcewake_count == 0) {
- dev_priv->uncore.forcewake_count++;
- delayed = true;
- mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
- jiffies + 1);
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-out:
- if (!delayed)
- intel_runtime_pm_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- WARN_ON(dev_priv->uncore.forcewake_count > 0);
+ for_each_fw_domain(domain, dev_priv, id)
+ WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
@@ -737,96 +564,118 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
}
}
-#define REG_READ_HEADER(x) \
- unsigned long irqflags; \
+#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ assert_device_not_suspended(dev_priv);
-#define REG_READ_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-#define __gen4_read(x) \
+#define __gen2_read(x) \
static u##x \
-gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN2_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN2_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
- REG_READ_FOOTER; \
+ GEN2_READ_FOOTER; \
+}
+
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen2_read(8)
+__gen2_read(16)
+__gen2_read(32)
+__gen2_read(64)
+
+#undef __gen5_read
+#undef __gen2_read
+
+#undef GEN2_READ_FOOTER
+#undef GEN2_READ_HEADER
+
+#define GEN6_READ_HEADER(x) \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+static inline void __force_wake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
+ if (WARN_ON(!fw_domains))
+ return;
+
+ /* Ideally GCC would be constant-fold and eliminate this loop */
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count) {
+ fw_domains &= ~(1 << id);
+ continue;
+ }
+
+ domain->wake_count++;
+ fw_domain_arm_timer(domain);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
+ GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
- if (dev_priv->uncore.forcewake_count == 0 && \
- NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- val = __raw_i915_read##x(dev_priv, reg); \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- unsigned fwengine = 0; \
- REG_READ_HEADER(x); \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ GEN6_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, \
+ FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_READ_FOOTER; \
+ GEN6_READ_FOOTER; \
}
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
@@ -835,33 +684,22 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
- REG_READ_HEADER(x); \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- } \
- REG_READ_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
}
__gen9_read(8)
@@ -880,55 +718,66 @@ __gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-__gen5_read(8)
-__gen5_read(16)
-__gen5_read(32)
-__gen5_read(64)
-__gen4_read(8)
-__gen4_read(16)
-__gen4_read(32)
-__gen4_read(64)
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
-#undef __gen5_read
-#undef __gen4_read
-#undef REG_READ_FOOTER
-#undef REG_READ_HEADER
+#undef GEN6_READ_FOOTER
+#undef GEN6_READ_HEADER
-#define REG_WRITE_HEADER \
- unsigned long irqflags; \
+#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-#define REG_WRITE_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+#define GEN2_WRITE_FOOTER
-#define __gen4_write(x) \
+#define __gen2_write(x) \
static void \
-gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ GEN2_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN2_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- REG_WRITE_FOOTER; \
+ GEN2_WRITE_FOOTER; \
}
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen2_write(8)
+__gen2_write(16)
+__gen2_write(32)
+__gen2_write(64)
+
+#undef __gen5_write
+#undef __gen2_write
+
+#undef GEN2_WRITE_FOOTER
+#undef GEN2_WRITE_HEADER
+
+#define GEN6_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define GEN6_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -936,14 +785,14 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -954,7 +803,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen8_shadowed_regs[] = {
@@ -981,50 +830,31 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
- if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ __raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- unsigned fwengine = 0; \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
- REG_WRITE_HEADER; \
+ GEN6_WRITE_HEADER; \
if (!shadowed) { \
- if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
+ __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
} \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
- REG_WRITE_FOOTER; \
+ GEN6_WRITE_FOOTER; \
}
static const u32 gen9_shadowed_regs[] = {
@@ -1054,36 +884,23 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
- REG_WRITE_HEADER; \
- if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
- is_gen9_shadowed(dev_priv, reg)) { \
- __raw_i915_write##x(dev_priv, reg, val); \
- } else { \
- unsigned fwengine = 0; \
- if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine = FORCEWAKE_RENDER; \
- } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine = FORCEWAKE_MEDIA; \
- } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
- if (dev_priv->uncore.fw_rendercount == 0) \
- fwengine |= FORCEWAKE_RENDER; \
- if (dev_priv->uncore.fw_mediacount == 0) \
- fwengine |= FORCEWAKE_MEDIA; \
- } else { \
- if (dev_priv->uncore.fw_blittercount == 0) \
- fwengine = FORCEWAKE_BLITTER; \
- } \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- fwengine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (fwengine) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- fwengine); \
- } \
- REG_WRITE_FOOTER; \
+ enum forcewake_domains fw_engine; \
+ GEN6_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) \
+ fw_engine = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
+ fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ fw_engine = FORCEWAKE_BLITTER; \
+ if (fw_engine) \
+ __force_wake_get(dev_priv, fw_engine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
}
__gen9_write(8)
@@ -1106,24 +923,14 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
-__gen5_write(8)
-__gen5_write(16)
-__gen5_write(32)
-__gen5_write(64)
-__gen4_write(8)
-__gen4_write(16)
-__gen4_write(32)
-__gen4_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
-#undef __gen5_write
-#undef __gen4_write
-#undef REG_WRITE_FOOTER
-#undef REG_WRITE_HEADER
+#undef GEN6_WRITE_FOOTER
+#undef GEN6_WRITE_HEADER
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
@@ -1141,24 +948,83 @@ do { \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
-void intel_uncore_init(struct drm_device *dev)
+
+static void fw_domain_init(struct drm_i915_private *dev_priv,
+ enum forcewake_domain_id domain_id,
+ u32 reg_set, u32 reg_ack)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore_forcewake_domain *d;
- setup_timer(&dev_priv->uncore.force_wake_timer,
- gen6_force_wake_timer, (unsigned long)dev_priv);
+ if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+ return;
- __intel_uncore_early_sanitize(dev, false);
+ d = &dev_priv->uncore.fw_domain[domain_id];
+
+ WARN_ON(d->wake_count);
+
+ d->wake_count = 0;
+ d->reg_set = reg_set;
+ d->reg_ack = reg_ack;
+
+ if (IS_GEN6(dev_priv)) {
+ d->val_reset = 0;
+ d->val_set = FORCEWAKE_KERNEL;
+ d->val_clear = 0;
+ } else {
+ d->val_reset = _MASKED_BIT_DISABLE(0xffff);
+ d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv))
+ d->reg_post = FORCEWAKE_ACK_VLV;
+ else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
+ d->reg_post = ECOBUS;
+ else
+ d->reg_post = 0;
+
+ d->i915 = dev_priv;
+ d->id = domain_id;
+
+ setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+
+ dev_priv->uncore.fw_domains |= (1 << domain_id);
+
+ fw_domain_reset(d);
+}
+
+static void intel_uncore_fw_domains_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_GEN9(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_ACK_RENDER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_ACK_BLITTER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ if (!IS_CHERRYVIEW(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -1171,31 +1037,47 @@ void intel_uncore_init(struct drm_device *dev)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+
+ /* We need to init first for ECOBUS access and then
+ * determine later if we want to reinit, in case of MT access is
+ * not working
+ */
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+
mutex_lock(&dev->struct_mutex);
- __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen7_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen7_gt_force_wake_mt_put;
- } else {
+ if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
+ fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ fw_domains_put_with_fifo;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE, FORCEWAKE_ACK);
}
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_ellc_detect(dev);
+ intel_uncore_fw_domains_init(dev);
+ __intel_uncore_early_sanitize(dev, false);
switch (INTEL_INFO(dev)->gen) {
default:
@@ -1236,8 +1118,8 @@ void intel_uncore_init(struct drm_device *dev)
case 4:
case 3:
case 2:
- ASSIGN_WRITE_MMIO_VFUNCS(gen4);
- ASSIGN_READ_MMIO_VFUNCS(gen4);
+ ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+ ASSIGN_READ_MMIO_VFUNCS(gen2);
break;
}