summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c71
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c28
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c36
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.c112
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c76
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h2
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
18 files changed, 303 insertions, 90 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index b6e28dcaea1d..1fb1daa0b366 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
- sizeof(struct kfd_event_data)))
+ sizeof(struct kfd_event_data))) {
+ ret = -EFAULT;
goto fail;
+ }
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 58bf94b69186..273e05a3c933 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1802,6 +1802,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
@@ -1817,6 +1818,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
+ mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..dba5c0ea0827 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -36,7 +36,10 @@ struct adv7511 {
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -48,6 +51,10 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
@@ -362,12 +369,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
@@ -422,7 +436,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,8 +472,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -456,7 +490,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +507,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -567,13 +601,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -849,10 +888,6 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -913,6 +948,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index d14bdc537587..0a2ac3efd04e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -957,6 +957,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..6fd7b50c5747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index fa111d581529..0f77e35ef287 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -666,6 +666,15 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
}
break;
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ break;
case HDCP_STATE_AUTH_ENC_NONE:
hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
if (sde_hdmi_tx_is_panel_on(hdmi_ctrl))
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 0d93edb9201f..0c143059b749 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -110,6 +110,20 @@ void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
}
+static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->hdr_eotf = SDE_HDMI_HDR_EOTF_NONE;
+ connector->hdr_metadata_type_one = false;
+ connector->hdr_max_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_avg_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_min_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_supported = false;
+}
+
static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
@@ -435,6 +449,19 @@ static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
vbi_pkt_reg |= BIT(5) | BIT(4);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ } else {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* disable GC CD override */
+ hdmi_ctrl_reg &= ~BIT(27);
+ /* disable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg &= ~BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+
+ /* disable the GC packet sending */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg &= ~(BIT(5) | BIT(4));
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
}
}
@@ -551,6 +578,7 @@ static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
display->sink_hdcp22_support = false;
+ sde_hdmi_clear_hdr_info(bridge);
mutex_unlock(&display->display_lock);
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
index 1e673440f399..51f5c8d8dde6 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
@@ -338,6 +338,41 @@ static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
HDCP_STATE_AUTH_FAIL);
}
+static void sde_hdmi_hdcp2p2_fail_noreauth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void sde_hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ sde_hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
u8 *buf, int size, u32 timeout)
{
@@ -888,6 +923,7 @@ void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
static struct hdcp_client_ops client_ops = {
.wakeup = sde_hdmi_hdcp2p2_wakeup,
.notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
+ .srm_cb = sde_hdmi_hdcp2p2_srm_cb,
};
static struct hdcp_txmtr_ops txmtr_ops;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 421bdf7643ca..3cef7e6aca39 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -109,6 +109,9 @@
#define SDE_HDMI_HDCP_14 0x14
#define SDE_HDMI_HDCP_NONE 0x0
+#define SDE_HDMI_HDR_LUMINANCE_NONE 0x0
+#define SDE_HDMI_HDR_EOTF_NONE 0x0
+
/*
* Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
* read by the hardware
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b245a4c7c826..6f968e93d959 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -185,12 +185,16 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ LIST_HEAD(tmp_head);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &tmp_head);
+ }
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
@@ -199,11 +203,7 @@ static void vblank_ctrl_worker(struct kthread_work *work)
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
}
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 0ba644d5519d..29e746e1fdf5 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,31 @@ static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
return false;
}
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ if (!crtc || !state || !perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -102,7 +127,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
sde_cstate = to_sde_crtc_state(state);
- bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
@@ -110,7 +137,7 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
}
@@ -126,11 +153,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
SDE_DEBUG("final threshold bw limit = %d\n", threshold);
if (!threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
@@ -138,26 +163,6 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
return 0;
}
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
- struct drm_crtc *crtc,
- struct sde_core_perf_params *perf)
-{
- struct sde_crtc_state *sde_cstate;
-
- sde_cstate = to_sde_crtc_state(crtc->state);
- memset(perf, 0, sizeof(struct sde_core_perf_params));
-
- perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
- perf->max_per_pipe_ib =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
- perf->core_clk_rate =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
- SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
- crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
-}
-
static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
struct drm_crtc *crtc, struct sde_core_perf_params *perf,
bool nrt_client, u32 core_clk)
@@ -175,13 +180,13 @@ static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
to_sde_crtc_state(tmp_crtc->state);
perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
- sde_cstate->cur_perf.max_per_pipe_ib);
+ sde_cstate->new_perf.max_per_pipe_ib);
- bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
SDE_DEBUG("crtc=%d bw=%llu\n",
tmp_crtc->base.id,
- sde_cstate->cur_perf.bw_ctl);
+ sde_cstate->new_perf.bw_ctl);
}
}
@@ -249,6 +254,7 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
@@ -263,6 +269,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
return;
}
+ sde_crtc = to_sde_crtc(crtc);
sde_cstate = to_sde_crtc_state(crtc->state);
/* only do this for command panel or writeback */
@@ -285,8 +292,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_cstate->cur_perf.bw_ctl = 0;
- sde_cstate->new_perf.bw_ctl = 0;
+ sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
_sde_core_perf_crtc_update_bus(kms, crtc, 0);
}
@@ -298,18 +304,27 @@ static int _sde_core_select_clk_lvl(struct sde_kms *kms,
return clk_round_rate(kms->perf.core_clk, clk_rate);
}
-static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+ struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
{
u32 clk_rate = 0;
- struct drm_crtc *crtc;
+ struct drm_crtc *tmp_crtc;
struct sde_crtc_state *sde_cstate;
int ncrtc = 0;
+ u32 tmp_rate;
+
+ drm_for_each_crtc(tmp_crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
- drm_for_each_crtc(crtc, kms->dev) {
- if (_sde_core_perf_crtc_is_power_on(crtc)) {
- sde_cstate = to_sde_crtc_state(crtc->state);
- clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
- clk_rate);
+ if (crtc->base.id == tmp_crtc->base.id) {
+ /* for current CRTC, use the cached value */
+ tmp_rate = crct_perf->core_clk_rate;
+ } else {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+ tmp_rate = sde_cstate->new_perf.core_clk_rate;
+ }
+
+ clk_rate = max(tmp_rate, clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
}
ncrtc++;
@@ -353,13 +368,20 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
SDE_ATRACE_BEGIN(__func__);
- old = &sde_cstate->cur_perf;
- new = &sde_cstate->new_perf;
+ /*
+ * cache the performance numbers in the crtc prior to the
+ * crtc kickoff, so the same numbers are used during the
+ * perf update that happens post kickoff.
+ */
+
+ if (params_changed)
+ memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+ sizeof(struct sde_core_perf_params));
- if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- if (params_changed)
- _sde_core_perf_calc_crtc(kms, crtc, new);
+ old = &sde_crtc->cur_perf;
+ new = &sde_crtc->new_perf;
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote or writeback output vote
@@ -398,7 +420,7 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
* use the new clock for the rotator bw calculation.
*/
if (update_clk)
- clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
if (update_bus)
_sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
@@ -409,7 +431,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
*/
if (update_clk) {
SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
- SDE_EVT32(kms->dev, stop_req, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+ old->core_clk_rate, new->core_clk_rate);
+
ret = sde_power_clk_set_rate(&priv->phandle,
kms->perf.clk_name, clk_rate);
if (ret) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e99eba0dadb7..2a31bc7fedc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -473,6 +473,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
struct sde_crtc_frame_event *fevent;
struct drm_crtc *crtc;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
@@ -482,13 +483,14 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
}
fevent = container_of(work, struct sde_crtc_frame_event, work);
- if (!fevent->crtc) {
+ if (!fevent->crtc || !fevent->crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
crtc = fevent->crtc;
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
sde_kms = _sde_crtc_get_kms(crtc);
if (!sde_kms) {
@@ -522,6 +524,9 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
} else {
SDE_EVT32(DRMID(crtc), fevent->event, 2);
}
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
ktime_to_ns(fevent->ts),
@@ -1883,15 +1888,18 @@ static const struct file_operations __prefix ## _fops = { \
static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "is_rt: %d\n", cstate->is_rt);
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
- seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n",
+ sde_crtc->cur_perf.core_clk_rate);
seq_printf(s, "max_per_pipe_ib: %llu\n",
- cstate->cur_perf.max_per_pipe_ib);
+ sde_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0eed61580cd8..200073995d43 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -95,6 +95,8 @@ struct sde_crtc_frame_event {
* @frame_event_list : available frame event list
* @pending : Whether any page-flip events are pending signal
* @spin_lock : spin lock for frame event, transaction status, etc...
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @new_perf : new performance committed to clock/bandwidth driver
*/
struct sde_crtc {
struct drm_crtc base;
@@ -134,6 +136,9 @@ struct sde_crtc {
struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -148,6 +153,7 @@ struct sde_crtc {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -161,7 +167,6 @@ struct sde_crtc_state {
uint64_t input_fence_timeout_ns;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
- struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index fe4b73b4ffea..de0551b22d2e 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -156,7 +156,7 @@ void sde_rm_init_hw_iter(
iter->type = type;
}
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
@@ -198,7 +198,21 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
return false;
}
-void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+ struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ int id)
{
struct list_head *blk_list;
struct sde_rm_hw_blk *blk;
@@ -225,6 +239,17 @@ void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
return hw;
}
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ void *ret = NULL;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
@@ -291,6 +316,8 @@ int sde_rm_destroy(struct sde_rm *rm)
sde_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
+ mutex_destroy(&rm->rm_lock);
+
return 0;
}
@@ -387,6 +414,9 @@ int sde_rm_init(struct sde_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -568,7 +598,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
if (lm_cfg->dspp != DSPP_MAX) {
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->dspp) {
*dspp = iter.blk;
break;
@@ -589,7 +619,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
}
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
break;
@@ -639,7 +669,8 @@ static int _sde_rm_reserve_lms(
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&dspp, 0, sizeof(dspp));
memset(&pp, 0, sizeof(pp));
@@ -657,7 +688,8 @@ static int _sde_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -693,7 +725,7 @@ static int _sde_rm_reserve_lms(
/* reserve a free PINGPONG_SLAVE block */
rc = -ENAVAIL;
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter_i)) {
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
struct sde_pingpong_cfg *pp_cfg =
(struct sde_pingpong_cfg *)
(iter_i.blk->catalog);
@@ -724,7 +756,7 @@ static int _sde_rm_reserve_ctls(
memset(&ctls, 0, sizeof(ctls));
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
unsigned long caps;
bool has_split_display, has_ppsplit;
@@ -771,7 +803,7 @@ static int _sde_rm_reserve_cdm(
struct sde_cdm_cfg *cdm;
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
@@ -816,7 +848,7 @@ static int _sde_rm_reserve_intf_or_wb(
/* Find the block entry in the rm, and note the reservation */
sde_rm_init_hw_iter(&iter, 0, type);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
@@ -1073,7 +1105,7 @@ static struct drm_connector *_sde_rm_get_connector(
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct drm_connector *conn)
@@ -1125,16 +1157,18 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
return;
}
+ mutex_lock(&rm->rm_lock);
+
rsvp = _sde_rm_get_rsvp(rm, enc);
if (!rsvp) {
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- return;
+ goto end;
}
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
- return;
+ goto end;
}
top_ctrl = sde_connector_get_property(conn->state,
@@ -1154,6 +1188,9 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_UNKNOWN);
}
+
+end:
+ mutex_unlock(&rm->rm_lock);
}
static int _sde_rm_commit_rsvp(
@@ -1232,13 +1269,15 @@ int sde_rm_reserve(
crtc_state->crtc->base.id, test_only);
SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+ mutex_lock(&rm->rm_lock);
+
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs);
if (ret) {
SDE_ERROR("failed to populate hw requirements\n");
- return ret;
+ goto end;
}
/*
@@ -1253,8 +1292,10 @@ int sde_rm_reserve(
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt)
- return -ENOMEM;
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
rsvp_cur = _sde_rm_get_rsvp(rm, enc);
@@ -1306,5 +1347,8 @@ int sde_rm_reserve(
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+end:
+ mutex_unlock(&rm->rm_lock);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 1cc22c5fbbf4..87e95bfebe98 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -70,6 +70,7 @@ enum sde_rm_topology_control {
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
*/
struct sde_rm {
struct drm_device *dev;
@@ -78,6 +79,7 @@ struct sde_rm {
struct sde_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
};
/**
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 49cca9399cb0..c414f68a8e0d 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -43,6 +43,7 @@ enum sde_hdcp_states {
HDCP_STATE_AUTHENTICATING,
HDCP_STATE_AUTHENTICATED,
HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH,
HDCP_STATE_AUTH_ENC_NONE,
HDCP_STATE_AUTH_ENC_1X,
HDCP_STATE_AUTH_ENC_2P2
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcfaff3c..4896474da320 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -180,6 +180,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429050c0..5d8dfe027b30 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);