diff options
Diffstat (limited to 'drivers/gpu')
59 files changed, 280 insertions, 451 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 053fc2f465df..bb1099c549df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1673,7 +1673,6 @@ struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - unsigned fw_version; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 5a8fbadbd27b..8ac49812a716 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) { return amdgpu_atpx_priv.atpx_detected; } +bool amdgpu_has_atpx_dgpu_power_cntl(void) { + return amdgpu_atpx_priv.atpx.functions.power_cntl; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - atpx->functions.power_cntl = true; - if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c961fe093e12..9d88023df836 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -61,6 +61,12 @@ static const char *amdgpu_asic_name[] = { "LAST", }; +#if defined(CONFIG_VGA_SWITCHEROO) +bool amdgpu_has_atpx_dgpu_power_cntl(void); +#else +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +#endif + bool amdgpu_device_is_px(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -1469,7 +1475,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_runtime_pm == 1) runtime = true; - if (amdgpu_device_is_px(ddev)) + if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) runtime = true; vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); if (runtime) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4488e82f87b0..e23843f4d877 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file fw_info.feature = adev->vce.fb_version; break; case AMDGPU_INFO_FW_UVD: - fw_info.ver = adev->uvd.fw_version; + fw_info.ver = 0; fw_info.feature = 0; break; case AMDGPU_INFO_FW_GMC: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 89df7871653d..064ebb347074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -52,7 +52,7 @@ struct amdgpu_hpd; #define AMDGPU_MAX_HPD_PINS 6 #define AMDGPU_MAX_CRTCS 6 -#define AMDGPU_MAX_AFMT_BLOCKS 9 +#define AMDGPU_MAX_AFMT_BLOCKS 7 enum amdgpu_rmx_type { RMX_OFF, @@ -308,8 +308,8 @@ struct amdgpu_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; bool mode_config_initialized; - struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; - struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; + struct amdgpu_crtc *crtcs[6]; + struct amdgpu_afmt *afmt[7]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 73628c7599e7..b8fbbd7699e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -540,7 +540,6 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (!metadata_size) { if (bo->metadata_size) { kfree(bo->metadata); - bo->metadata = NULL; bo->metadata_size = 0; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 3b35ad83867c..53f987aeeacf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -156,9 +156,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", version_major, version_minor, family_id); - adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | - (family_id << 8)); - bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, @@ -276,8 +273,6 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, (adev->uvd.fw->size) - offset); - cancel_delayed_work_sync(&adev->uvd.idle_work); - size = amdgpu_bo_size(adev->uvd.vcpu_bo); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr = adev->uvd.cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index bb0da76051a1..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -220,7 +220,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) if (i == AMDGPU_MAX_VCE_HANDLES) return 0; - cancel_delayed_work_sync(&adev->vce.idle_work); /* TODO: suspending running encoding sessions isn't supported */ return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1cd6de575305..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -298,10 +298,6 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; - /* vertical FP must be at least 1 */ - if (mode->crtc_vsync_start == mode->crtc_vdisplay) - adjusted_mode->crtc_vsync_start++; - /* get the native mode for scaling */ if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) amdgpu_panel_mode_fixup(encoder, adjusted_mode); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 946300764609..aa491540ba85 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); - uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint32_t seq = ring->fence_drv.sync_seq; uint64_t addr = ring->fence_drv.gpu_addr; amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index d5e19b5fbbfb..c34c393e9aea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers( union SQ_CMD_BITS *in_reg_sq_cmd, union GRBM_GFX_INDEX_BITS *in_reg_gfx_index) { - int status = 0; + int status; union SQ_CMD_BITS reg_sq_cmd; union GRBM_GFX_INDEX_BITS reg_gfx_index; struct HsaDbgWaveMsgAMDGen2 *pMsg; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index d268bf18a662..39d7e2e15c11 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1665,19 +1665,13 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb; int len, ret, port_num; - port = drm_dp_get_validated_port_ref(mgr, port); - if (!port) - return -EINVAL; - port_num = port->port_num; mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); if (!mstb) { mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); - if (!mstb) { - drm_dp_put_port(port); + if (!mstb) return -EINVAL; - } } txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); @@ -1703,7 +1697,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, kfree(txmsg); fail_put: drm_dp_put_mst_branch_device(mstb); - drm_dp_put_port(port); return ret; } @@ -1786,11 +1779,6 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) req_payload.start_slot = cur_slots; if (mgr->proposed_vcpis[i]) { port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); - port = drm_dp_get_validated_port_ref(mgr, port); - if (!port) { - mutex_unlock(&mgr->payload_lock); - return -EINVAL; - } req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; } else { port = NULL; @@ -1816,9 +1804,6 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) mgr->payloads[i].payload_state = req_payload.payload_state; } cur_slots += req_payload.num_slots; - - if (port) - drm_dp_put_port(port); } for (i = 0; i < mgr->max_payloads; i++) { @@ -2124,8 +2109,6 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) if (mgr->mst_primary) { int sret; - u8 guid[16]; - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); if (sret != DP_RECEIVER_CAP_SIZE) { DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); @@ -2140,16 +2123,6 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) ret = -1; goto out_unlock; } - - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); - ret = 0; } else ret = -1; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7e461dca564c..bc7b8faba84d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2838,14 +2838,7 @@ enum skl_disp_power_wells { #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) #define BXT_RP_STATE_CAP 0x138170 -/* - * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS - * 8300) freezing up around GPU hangs. Looks as if even - * scheduling/timer interrupts start misbehaving if the RPS - * EI/thresholds are "bad", leading to a very sluggish or even - * frozen machine. - */ -#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) +#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) #define INTERVAL_0_833_US(us) (((us) * 6) / 5) #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 97d1ed20418b..6a2c76e367a5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -248,14 +248,8 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; /* LPT FDI RX only supports 8bpc. */ - if (HAS_PCH_LPT(dev)) { - if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { - DRM_DEBUG_KMS("LPT only supports 24bpp\n"); - return false; - } - + if (HAS_PCH_LPT(dev)) pipe_config->pipe_bpp = 24; - } /* FDI must always be 2.7 GHz */ if (HAS_DDI(dev)) { diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index fc28c512ece3..9e530a739354 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -180,8 +180,7 @@ struct stepping_info { static const struct stepping_info skl_stepping_info[] = { {'A', '0'}, {'B', '0'}, {'C', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'}, - {'G', '0'}, {'H', '0'}, {'I', '0'}, - {'J', '0'}, {'K', '0'} + {'G', '0'}, {'H', '0'}, {'I', '0'} }; static struct stepping_info bxt_stepping_info[] = { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 3c6b07683bd9..7e6158b889da 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -464,17 +464,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, } else if (IS_BROADWELL(dev)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; - - if (dev_priv->edp_low_vswing) { - ddi_translations_edp = bdw_ddi_translations_edp; - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); - } else { - ddi_translations_edp = bdw_ddi_translations_dp; - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - } - + ddi_translations_edp = bdw_ddi_translations_edp; ddi_translations_hdmi = bdw_ddi_translations_hdmi; - + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); hdmi_default_entry = 7; @@ -3196,6 +3188,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_ddi_clock_get(encoder, pipe_config); } +static void intel_ddi_destroy(struct drm_encoder *encoder) +{ + /* HDMI has nothing special to destroy, so we can go with this. */ + intel_dp_encoder_destroy(encoder); +} + static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -3214,8 +3212,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, } static const struct drm_encoder_funcs intel_ddi_funcs = { - .reset = intel_dp_encoder_reset, - .destroy = intel_dp_encoder_destroy, + .destroy = intel_ddi_destroy, }; static struct intel_connector * @@ -3287,7 +3284,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->post_disable = intel_ddi_post_disable; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; - intel_encoder->suspend = intel_dp_encoder_suspend; intel_dig_port->port = port; intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index afa81691163d..f859a5b87ed4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), + &state->scaler_state.scaler_id, DRM_ROTATE_0, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e55a82a99e7f..78b8ec84d576 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5035,7 +5035,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } -void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); @@ -5077,7 +5077,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) edp_panel_vdd_schedule_off(intel_dp); } -void intel_dp_encoder_reset(struct drm_encoder *encoder) +static void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct intel_dp *intel_dp; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 06bd9257acdc..0639275fc471 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -477,8 +477,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; - intel_connector->unregister(intel_connector); - /* need to nuke the connector */ drm_modeset_lock_all(dev); if (connector->state->crtc) { @@ -492,7 +490,11 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, WARN(ret, "Disabling mst crtc failed with %i\n", ret); } + drm_modeset_unlock_all(dev); + intel_connector->unregister(intel_connector); + + drm_modeset_lock_all(dev); intel_connector_remove_from_fbdev(intel_connector); drm_connector_cleanup(connector); drm_modeset_unlock_all(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f34a219ec5c4..0d00f07b7163 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1204,8 +1204,6 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); -void intel_dp_encoder_reset(struct drm_encoder *encoder); -void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4b8ed9f2dabc..e6c035b0fc1c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1388,16 +1388,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) hdmi_to_dig_port(intel_hdmi)); } - if (!live_status) { - DRM_DEBUG_KMS("HDMI live status down\n"); - /* - * Live status register is not reliable on all intel platforms. - * So consider live_status only for certain platforms, for - * others, read EDID to determine presence of sink. - */ - if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) - live_status = true; - } + if (!live_status) + DRM_DEBUG_KMS("Live status not up!"); intel_hdmi_unset_edid(connector); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7058f75c7b42..d69547a65dbb 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So don't need an immediate wrap - * and only need to effectively wait for the reserved - * size space from the start of ringbuffer. + * falls off the end. So only need to to wait for the + * reserved size after flushing out the remainder. */ wait_bytes = remain_actual + ringbuf->reserved_size; + need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a68d2ec89dc..f091ad12d694 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6620,12 +6620,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev) misccpctl = I915_READ(GEN7_MISCCPCTL); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); - /* - * Wait at least 100 clocks before re-enabling clock gating. See - * the definition of L3SQCREG1 in BSpec. - */ - POSTING_READ(GEN8_L3SQCREG1); - udelay(1); I915_WRITE(GEN7_MISCCPCTL, misccpctl); /* diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9d48443bca2e..f6b2a814e629 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1922,17 +1922,6 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, return 0; } -static void cleanup_phys_status_page(struct intel_engine_cs *ring) -{ - struct drm_i915_private *dev_priv = to_i915(ring->dev); - - if (!dev_priv->status_page_dmah) - return; - - drm_pci_free(ring->dev, dev_priv->status_page_dmah); - ring->status_page.page_addr = NULL; -} - static void cleanup_status_page(struct intel_engine_cs *ring) { struct drm_i915_gem_object *obj; @@ -1949,9 +1938,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring) static int init_status_page(struct intel_engine_cs *ring) { - struct drm_i915_gem_object *obj = ring->status_page.obj; + struct drm_i915_gem_object *obj; - if (obj == NULL) { + if ((obj = ring->status_page.obj) == NULL) { unsigned flags; int ret; @@ -2145,7 +2134,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto error; } else { - WARN_ON(ring->id != RCS); + BUG_ON(ring->id != RCS); ret = init_phys_status_page(ring); if (ret) goto error; @@ -2190,12 +2179,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) if (ring->cleanup) ring->cleanup(ring); - if (I915_NEED_GFX_HWS(ring->dev)) { - cleanup_status_page(ring); - } else { - WARN_ON(ring->id != RCS); - cleanup_phys_status_page(ring); - } + cleanup_status_page(ring); i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(&ring->batch_pool); @@ -2357,11 +2341,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So don't need an immediate wrap - * and only need to effectively wait for the reserved - * size space from the start of ringbuffer. + * falls off the end. So only need to to wait for the + * reserved size after flushing out the remainder. */ wait_bytes = remain_actual + ringbuf->reserved_size; + need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index cc91ae832ffb..43cba129a0c0 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1132,11 +1132,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - if (IS_HASWELL(dev)) - dev_priv->uncore.funcs.force_wake_put = - fw_domains_put_with_fifo; - else - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; + dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); } else if (IS_IVYBRIDGE(dev)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c index 89da47234016..3216e157a8a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c @@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht) struct nvkm_ramht *ramht = *pramht; if (ramht) { nvkm_gpuobj_del(&ramht->gpuobj); - vfree(*pramht); + kfree(*pramht); *pramht = NULL; } } @@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align, struct nvkm_ramht *ramht; int ret, i; - if (!(ramht = *pramht = vzalloc(sizeof(*ramht) + - (size >> 3) * sizeof(*ramht->data)))) + if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) * + sizeof(*ramht->data), GFP_KERNEL))) return -ENOMEM; ramht->device = device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 36655a74c538..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1717,8 +1717,6 @@ gf100_gr_init(struct gf100_gr *gr) gf100_gr_mmio(gr, gr->func->mmio); - nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); - memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); for (i = 0, gpc = -1; i < gr->tpc_total; i++) { do { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5edebf495c07..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -375,15 +375,10 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, qxl_bo_kunmap(user_bo); - qcrtc->cur_x += qcrtc->hot_spot_x - hot_x; - qcrtc->cur_y += qcrtc->hot_spot_y - hot_y; - qcrtc->hot_spot_x = hot_x; - qcrtc->hot_spot_y = hot_y; - cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; - cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; + cmd->u.set.position.x = qcrtc->cur_x; + cmd->u.set.position.y = qcrtc->cur_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); @@ -446,8 +441,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; - cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; - cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; + cmd->u.position.x = qcrtc->cur_x; + cmd->u.position.y = qcrtc->cur_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3ab90179e9ab..01a86948eb8c 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -135,8 +135,6 @@ struct qxl_crtc { int index; int cur_x; int cur_y; - int hot_spot_x; - int hot_spot_y; }; struct qxl_output { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 79bab6fd76bb..dac78ad24b31 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1739,7 +1739,6 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc) static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; @@ -1749,10 +1748,6 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { - /* PPLL2 is exclusive to UNIPHYA on DCE61 */ - if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) && - test_radeon_crtc->pll_id == ATOM_PPLL2) - continue; /* for DP use the same PLL for all */ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) return test_radeon_crtc->pll_id; @@ -1774,7 +1769,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; u32 adjusted_clock, test_adjusted_clock; @@ -1790,10 +1784,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { - /* PPLL2 is exclusive to UNIPHYA on DCE61 */ - if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) && - test_radeon_crtc->pll_id == ATOM_PPLL2) - continue; /* check if we are already driving this connector with another crtc */ if (test_radeon_crtc->connector == radeon_crtc->connector) { /* if we are, return that pll */ diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0b04b9282f56..adf74f4366bb 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -310,10 +310,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; - /* vertical FP must be at least 1 */ - if (mode->crtc_vsync_start == mode->crtc_vdisplay) - adjusted_mode->crtc_vsync_start++; - /* get the native mode for scaling */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_panel_mode_fixup(encoder, adjusted_mode); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 32491355a1d4..2ad462896896 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2608,152 +2608,10 @@ static void evergreen_agp_enable(struct radeon_device *rdev) WREG32(VM_CONTEXT1_CNTL, 0); } -static const unsigned ni_dig_offsets[] = -{ - NI_DIG0_REGISTER_OFFSET, - NI_DIG1_REGISTER_OFFSET, - NI_DIG2_REGISTER_OFFSET, - NI_DIG3_REGISTER_OFFSET, - NI_DIG4_REGISTER_OFFSET, - NI_DIG5_REGISTER_OFFSET -}; - -static const unsigned ni_tx_offsets[] = -{ - NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1, - NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1, - NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1, - NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1, - NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1, - NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 -}; - -static const unsigned evergreen_dp_offsets[] = -{ - EVERGREEN_DP0_REGISTER_OFFSET, - EVERGREEN_DP1_REGISTER_OFFSET, - EVERGREEN_DP2_REGISTER_OFFSET, - EVERGREEN_DP3_REGISTER_OFFSET, - EVERGREEN_DP4_REGISTER_OFFSET, - EVERGREEN_DP5_REGISTER_OFFSET -}; - - -/* - * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc - * We go from crtc to connector and it is not relible since it - * should be an opposite direction .If crtc is enable then - * find the dig_fe which selects this crtc and insure that it enable. - * if such dig_fe is found then find dig_be which selects found dig_be and - * insure that it enable and in DP_SST mode. - * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing - * from dp symbols clocks . - */ -static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev, - unsigned crtc_id, unsigned *ret_dig_fe) -{ - unsigned i; - unsigned dig_fe; - unsigned dig_be; - unsigned dig_en_be; - unsigned uniphy_pll; - unsigned digs_fe_selected; - unsigned dig_be_mode; - unsigned dig_fe_mask; - bool is_enabled = false; - bool found_crtc = false; - - /* loop through all running dig_fe to find selected crtc */ - for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { - dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]); - if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON && - crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) { - /* found running pipe */ - found_crtc = true; - dig_fe_mask = 1 << i; - dig_fe = i; - break; - } - } - - if (found_crtc) { - /* loop through all running dig_be to find selected dig_fe */ - for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { - dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]); - /* if dig_fe_selected by dig_be? */ - digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be); - dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be); - if (dig_fe_mask & digs_fe_selected && - /* if dig_be in sst mode? */ - dig_be_mode == NI_DIG_BE_DPSST) { - dig_en_be = RREG32(NI_DIG_BE_EN_CNTL + - ni_dig_offsets[i]); - uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 + - ni_tx_offsets[i]); - /* dig_be enable and tx is running */ - if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE && - dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON && - uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) { - is_enabled = true; - *ret_dig_fe = dig_fe; - break; - } - } - } - } - - return is_enabled; -} - -/* - * Blank dig when in dp sst mode - * Dig ignores crtc timing - */ -static void evergreen_blank_dp_output(struct radeon_device *rdev, - unsigned dig_fe) -{ - unsigned stream_ctrl; - unsigned fifo_ctrl; - unsigned counter = 0; - - if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) { - DRM_ERROR("invalid dig_fe %d\n", dig_fe); - return; - } - - stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + - evergreen_dp_offsets[dig_fe]); - if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) { - DRM_ERROR("dig %d , should be enable\n", dig_fe); - return; - } - - stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; - WREG32(EVERGREEN_DP_VID_STREAM_CNTL + - evergreen_dp_offsets[dig_fe], stream_ctrl); - - stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + - evergreen_dp_offsets[dig_fe]); - while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) { - msleep(1); - counter++; - stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + - evergreen_dp_offsets[dig_fe]); - } - if (counter >= 32 ) - DRM_ERROR("counter exceeds %d\n", counter); - - fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]); - fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET; - WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl); - -} - void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { u32 crtc_enabled, tmp, frame_count, blackout; int i, j; - unsigned dig_fe; if (!ASIC_IS_NODCE(rdev)) { save->vga_render_control = RREG32(VGA_RENDER_CONTROL); @@ -2793,17 +2651,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav break; udelay(1); } - /*we should disable dig if it drives dp sst*/ - /*but we are in radeon_device_init and the topology is unknown*/ - /*and it is available after radeon_modeset_init*/ - /*the following method radeon_atom_encoder_dpms_dig*/ - /*does the job if we initialize it properly*/ - /*for now we do it this manually*/ - /**/ - if (ASIC_IS_DCE5(rdev) && - evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe)) - evergreen_blank_dp_output(rdev, dig_fe); - /*we could remove 6 lines below*/ + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index b436badf9efa..aa939dfed3a3 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -250,43 +250,8 @@ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ #define EVERGREEN_HDMI_BASE 0x7030 -/*DIG block*/ -#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000) -#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000) -#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000) -#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000) -#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000) -#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000) - - -#define NI_DIG_FE_CNTL 0x7000 -# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3) -# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24) - - -#define NI_DIG_BE_CNTL 0x7140 -# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F) -# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 ) - -#define NI_DIG_BE_EN_CNTL 0x7144 -# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0) -# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8) -# define NI_DIG_BE_DPSST 0 /* Display Port block */ -#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C) -#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C) -#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C) -#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C) -#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C) -#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C) - - -#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C -# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0) -# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16) -#define EVERGREEN_DP_STEER_FIFO 0x7310 -# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0) #define EVERGREEN_DP_SEC_CNTL 0x7280 # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) @@ -301,15 +266,4 @@ # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) # define EVERGREEN_DP_SEC_SS_EN (1 << 28) -/*DCIO_UNIPHY block*/ -#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600) -#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600) -#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600) -#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600) -#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600) -#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600) - -#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618 -# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0) - #endif diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index c4b4f298a283..9bc408c9f9f6 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -62,6 +62,10 @@ bool radeon_has_atpx(void) { return radeon_atpx_priv.atpx_detected; } +bool radeon_has_atpx_dgpu_power_cntl(void) { + return radeon_atpx_priv.atpx.functions.power_cntl; +} + /** * radeon_atpx_call - call an ATPX method * @@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas */ static int radeon_atpx_validate(struct radeon_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - atpx->functions.power_cntl = true; - if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 9cfc1c3e1965..340f3f549f29 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1996,12 +1996,10 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); - if (radeon_audio != 0) { + if (radeon_audio != 0) drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); - radeon_connector->audio = RADEON_AUDIO_AUTO; - } if (ASIC_IS_DCE5(rdev)) drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.output_csc_property, @@ -2126,7 +2124,6 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); - radeon_connector->audio = RADEON_AUDIO_AUTO; } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; @@ -2182,7 +2179,6 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); - radeon_connector->audio = RADEON_AUDIO_AUTO; } if (ASIC_IS_DCE5(rdev)) drm_object_attach_property(&radeon_connector->base.base, @@ -2235,7 +2231,6 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); - radeon_connector->audio = RADEON_AUDIO_AUTO; } if (ASIC_IS_DCE5(rdev)) drm_object_attach_property(&radeon_connector->base.base, diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c566993a2ec3..f78f111e68de 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = { "LAST", }; +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_has_atpx_dgpu_power_cntl(void); +#else +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +#endif + #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) @@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev, * ignore it */ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); - if (rdev->flags & RADEON_IS_PX) + if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) runtime = true; vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); if (runtime) diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index db64e0062689..3b0c229d7dcd 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg tmp &= AUX_HPD_SEL(0x7); tmp |= AUX_HPD_SEL(chan->rec.hpd); - tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); + tmp |= AUX_EN | AUX_LS_READ_EN; WREG32(AUX_CONTROL + aux_offset[instance], tmp); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index f342aad79cc6..e06ac546a90f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -235,8 +235,6 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); - if (radeon_ttm_tt_has_userptr(bo->ttm)) - return -EPERM; return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index caa73de584a5..7285adb27099 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2931,7 +2931,6 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 0585fd2031dd..a0e28f3a278d 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1068,6 +1068,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) goto err_register; } + pdev->dev.of_node = of_node; pdev->dev.parent = dev; ret = platform_device_add_data(pdev, ®->pdata, @@ -1078,12 +1079,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) platform_device_put(pdev); goto err_register; } - - /* - * Set of_node only after calling platform_device_add. Otherwise - * the platform:imx-ipuv3-crtc modalias won't be used. - */ - pdev->dev.of_node = of_node; } return 0; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 18fdd400ac7a..362493118670 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1193,7 +1193,8 @@ static int adreno_init(struct kgsl_device *device) if (!adreno_is_a3xx(adreno_dev)) { int r = kgsl_allocate_global(device, - &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, 0, 0); + &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, + 0, 0, "alwayson"); adreno_dev->cmdbatch_profile_index = 0; @@ -1279,7 +1280,7 @@ static void _setup_throttling_counters(struct adreno_device *adreno_dev) static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) { - int i; + int i, adj; uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS]; struct adreno_busy_data *busy = &adreno_dev->busy_data; @@ -1300,8 +1301,14 @@ static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) adreno_dev->gpmu_throttle_counters[i], &busy->throttle_cycles[i]); } - i = th[CRC_MORE50PCT] - th[IDLE_10PCT]; - return th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (i < 0 ? 0 : i) * 3; + adj = th[CRC_MORE50PCT] - th[IDLE_10PCT]; + adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3; + + trace_kgsl_clock_throttling( + th[IDLE_10PCT], th[CRC_50PCT], + th[CRC_MORE50PCT], th[CRC_LESS50PCT], + adj); + return adj; } static void _update_threshold_count(struct adreno_device *adreno_dev, diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 2accbe5c5764..97e71464c2df 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -174,7 +174,7 @@ static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &adreno_dev->pwron_fixup, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index b15d23cfbe0a..7a691667e59f 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -1360,7 +1360,7 @@ static int _a4xx_pwron_fixup(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &adreno_dev->pwron_fixup, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 3252bfb764f2..583de85678fc 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -244,7 +244,8 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, + 0, "crit_pkts"); if (ret) return ret; @@ -258,19 +259,19 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf1, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf1"); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf2, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf2"); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf3, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf3"); if (ret) return ret; @@ -2366,7 +2367,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile, } ret = kgsl_allocate_global(device, ucode, fw->size - 4, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode"); if (ret) goto done; diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index c1463b824c67..ffd7acbbe1f9 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -490,7 +490,8 @@ static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev, int ret; ret = kgsl_allocate_global(device, &rb->preemption_desc, - A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED); + A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED, + "preemption_desc"); if (ret) return ret; @@ -525,7 +526,8 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) /* Allocate mem for storing preemption smmu record */ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED); + KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, + "smmu_info"); } #else static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) @@ -555,7 +557,8 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) /* Allocate mem for storing preemption counters */ ret = kgsl_allocate_global(device, &preempt->counters, adreno_dev->num_ringbuffers * - A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0); + A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, + "preemption_counters"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c index 04d82844a5e9..aeffeab2f6dc 100644 --- a/drivers/gpu/msm/adreno_a5xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c @@ -1033,11 +1033,11 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev) /* The script buffers needs 2 extra qwords on the end */ if (kgsl_allocate_global(device, &capturescript, script_size + 16, KGSL_MEMFLAGS_GPUREADONLY, - KGSL_MEMDESC_PRIVILEGED)) + KGSL_MEMDESC_PRIVILEGED, "capturescript")) return; if (kgsl_allocate_global(device, ®isters, data_size, 0, - KGSL_MEMDESC_PRIVILEGED)) { + KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) { kgsl_free_global(KGSL_DEVICE(adreno_dev), &capturescript); return; } diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 9cbcd06d7658..680827e5b848 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -138,7 +138,7 @@ static void sync_event_print(struct seq_file *s, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: - seq_printf(s, "sync: [%p] %s", sync_event->handle, + seq_printf(s, "sync: [%pK] %s", sync_event->handle, (sync_event->handle && sync_event->handle->fence) ? sync_event->handle->fence->name : "NULL"); break; diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index c4fab8a5528a..d8af520b2fe6 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -1071,7 +1071,8 @@ void adreno_profile_init(struct adreno_device *adreno_dev) /* allocate shared_buffer, which includes pre_ib and post_ib */ profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS; ret = kgsl_allocate_global(device, &profile->shared_buffer, - profile->shared_size * sizeof(unsigned int), 0, 0); + profile->shared_size * sizeof(unsigned int), + 0, 0, "profile"); if (ret) { profile->shared_size = 0; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 5ffb0b2513f3..07ef09034d7c 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -250,12 +250,12 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev, * switch pagetable */ ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->pagetable_desc, - PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED); + PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc"); if (ret) return ret; - return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc, - KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, + 0, "ringbuffer"); } int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 24005a1fda72..f9eb080d903b 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4457,9 +4457,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device) disable_irq(device->pwrctrl.interrupt_num); KGSL_DRV_INFO(device, - "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n", - device->id, device->reg_phys, device->reg_len, - device->reg_virt); + "dev_id %d regs phys 0x%08lx size 0x%08x\n", + device->id, device->reg_phys, device->reg_len); rwlock_init(&device->context_lock); @@ -4475,13 +4474,13 @@ int kgsl_device_platform_probe(struct kgsl_device *device) goto error_close_mmu; status = kgsl_allocate_global(device, &device->memstore, - KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG); + KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore"); if (status != 0) goto error_close_mmu; status = kgsl_allocate_global(device, &device->scratch, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "scratch"); if (status != 0) goto error_free_memstore; diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c index 2e90f78a303c..8e783f8ce017 100644 --- a/drivers/gpu/msm/kgsl_cffdump.c +++ b/drivers/gpu/msm/kgsl_cffdump.c @@ -513,10 +513,6 @@ EXPORT_SYMBOL(kgsl_cffdump_waitirq); static int subbuf_start_handler(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { - pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf" - "=%p, prev_padding=%08zx)\n", subbuf, prev_subbuf, - prev_padding); - if (relay_buf_full(buf)) { if (!suspended) { suspended = 1; @@ -573,9 +569,6 @@ static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs) { struct rchan *chan; - pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, " - "n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir); - chan = relay_open("cpu", dir, subbuf_size, n_subbufs, &relay_callbacks, NULL); if (!chan) { diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_cmdbatch.c index ceca8b1e1522..6272410ce544 100644 --- a/drivers/gpu/msm/kgsl_cmdbatch.c +++ b/drivers/gpu/msm/kgsl_cmdbatch.c @@ -80,7 +80,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: if (event->handle) - dev_err(device->dev, " fence: [%p] %s\n", + dev_err(device->dev, " fence: [%pK] %s\n", event->handle->fence, event->handle->name); else diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index df9eb9ebd779..2f293e4da398 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -280,6 +280,29 @@ static const struct file_operations process_sparse_mem_fops = { .release = process_mem_release, }; +static int globals_print(struct seq_file *s, void *unused) +{ + kgsl_print_global_pt_entries(s); + return 0; +} + +static int globals_open(struct inode *inode, struct file *file) +{ + return single_open(file, globals_print, NULL); +} + +static int globals_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations global_fops = { + .open = globals_open, + .read = seq_read, + .llseek = seq_lseek, + .release = globals_release, +}; + /** * kgsl_process_init_debugfs() - Initialize debugfs for a process * @private: Pointer to process private structure created for the process @@ -336,6 +359,9 @@ void kgsl_core_debugfs_init(void) kgsl_debugfs_dir = debugfs_create_dir("kgsl", NULL); + debugfs_create_file("globals", 0444, kgsl_debugfs_dir, NULL, + &global_fops); + debug_dir = debugfs_create_dir("debug", kgsl_debugfs_dir); debugfs_create_file("strict_memory", 0644, debug_dir, NULL, diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 166bb68e64a1..71b6086423d6 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -38,6 +38,10 @@ #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu)) +#define ADDR_IN_GLOBAL(_a) \ + (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ + ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) + static struct kgsl_mmu_pt_ops iommu_pt_ops; static bool need_iommu_sync; @@ -92,19 +96,41 @@ static struct kmem_cache *addr_entry_cache; #define GLOBAL_PT_ENTRIES 32 -static struct kgsl_memdesc *global_pt_entries[GLOBAL_PT_ENTRIES]; +struct global_pt_entry { + struct kgsl_memdesc *memdesc; + char name[32]; +}; + +static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES]; static struct kgsl_memdesc *kgsl_global_secure_pt_entry; static int global_pt_count; uint64_t global_pt_alloc; static struct kgsl_memdesc gpu_qdss_desc; +void kgsl_print_global_pt_entries(struct seq_file *s) +{ + int i; + + for (i = 0; i < global_pt_count; i++) { + struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc; + + if (memdesc == NULL) + continue; + + seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n", + memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1, + memdesc->size, global_pt_entries[i].name); + } +} + static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable) { unsigned int i; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] != NULL) - kgsl_mmu_unmap(pagetable, global_pt_entries[i]); + if (global_pt_entries[i].memdesc != NULL) + kgsl_mmu_unmap(pagetable, + global_pt_entries[i].memdesc); } } @@ -113,8 +139,9 @@ static void kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable) unsigned int i; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] != NULL) { - int ret = kgsl_mmu_map(pagetable, global_pt_entries[i]); + if (global_pt_entries[i].memdesc != NULL) { + int ret = kgsl_mmu_map(pagetable, + global_pt_entries[i].memdesc); BUG_ON(ret); } @@ -152,17 +179,17 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu, return; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] == memdesc) { + if (global_pt_entries[i].memdesc == memdesc) { memdesc->gpuaddr = 0; memdesc->priv &= ~KGSL_MEMDESC_GLOBAL; - global_pt_entries[i] = NULL; + global_pt_entries[i].memdesc = NULL; return; } } } static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { if (memdesc->gpuaddr != 0) return; @@ -174,7 +201,10 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, memdesc->priv |= KGSL_MEMDESC_GLOBAL; global_pt_alloc += memdesc->size; - global_pt_entries[global_pt_count++] = memdesc; + global_pt_entries[global_pt_count].memdesc = memdesc; + strlcpy(global_pt_entries[global_pt_count].name, name, + sizeof(global_pt_entries[global_pt_count].name)); + global_pt_count++; } void kgsl_add_global_secure_entry(struct kgsl_device *device, @@ -220,7 +250,7 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device) return; } - kgsl_mmu_add_global(device, &gpu_qdss_desc); + kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss"); } static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu) @@ -493,8 +523,62 @@ struct _mem_entry { unsigned int priv; int pending_free; pid_t pid; + char name[32]; }; +static void _get_global_entries(uint64_t faultaddr, + struct _mem_entry *prev, + struct _mem_entry *next) +{ + int i; + uint64_t prevaddr = 0; + struct global_pt_entry *p = NULL; + + uint64_t nextaddr = (uint64_t) -1; + struct global_pt_entry *n = NULL; + + for (i = 0; i < global_pt_count; i++) { + uint64_t addr; + + if (global_pt_entries[i].memdesc == NULL) + continue; + + addr = global_pt_entries[i].memdesc->gpuaddr; + if ((addr < faultaddr) && (addr > prevaddr)) { + prevaddr = addr; + p = &global_pt_entries[i]; + } + + if ((addr > faultaddr) && (addr < nextaddr)) { + nextaddr = addr; + n = &global_pt_entries[i]; + } + } + + if (p != NULL) { + prev->gpuaddr = p->memdesc->gpuaddr; + prev->size = p->memdesc->size; + prev->flags = p->memdesc->flags; + prev->priv = p->memdesc->priv; + prev->pid = 0; + strlcpy(prev->name, p->name, sizeof(prev->name)); + } + + if (n != NULL) { + next->gpuaddr = n->memdesc->gpuaddr; + next->size = n->memdesc->size; + next->flags = n->memdesc->flags; + next->priv = n->memdesc->priv; + next->pid = 0; + strlcpy(next->name, n->name, sizeof(next->name)); + } +} + +void __kgsl_get_memory_usage(struct _mem_entry *entry) +{ + kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags); +} + static void _get_entries(struct kgsl_process_private *private, uint64_t faultaddr, struct _mem_entry *prev, struct _mem_entry *next) @@ -529,6 +613,7 @@ static void _get_entries(struct kgsl_process_private *private, prev->priv = p->memdesc.priv; prev->pending_free = p->pending_free; prev->pid = private->pid; + __kgsl_get_memory_usage(prev); } if (n != NULL) { @@ -538,6 +623,7 @@ static void _get_entries(struct kgsl_process_private *private, next->priv = n->memdesc.priv; next->pending_free = n->pending_free; next->pid = private->pid; + __kgsl_get_memory_usage(next); } } @@ -553,7 +639,9 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, /* Set the maximum possible size as an initial value */ nextentry->gpuaddr = (uint64_t) -1; - if (context) { + if (ADDR_IN_GLOBAL(faultaddr)) { + _get_global_entries(faultaddr, preventry, nextentry); + } else if (context) { private = context->proc_priv; spin_lock(&private->mem_lock); _get_entries(private, faultaddr, preventry, nextentry); @@ -563,18 +651,13 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry) { - char name[32]; - memset(name, 0, sizeof(name)); - - kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags); - KGSL_LOG_DUMP(device, "[%016llX - %016llX] %s %s (pid = %d) (%s)\n", entry->gpuaddr, entry->gpuaddr + entry->size, entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "", entry->pending_free ? "(pending free)" : "", - entry->pid, name); + entry->pid, entry->name); } static void _check_if_freed(struct kgsl_iommu_context *ctx, @@ -1395,7 +1478,7 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu) } } - kgsl_iommu_add_global(mmu, &iommu->setstate); + kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate"); kgsl_setup_qdss_desc(device); done: @@ -2220,10 +2303,6 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable, return addr; } -#define ADDR_IN_GLOBAL(_a) \ - (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ - ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) - static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr, uint64_t size) { diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index 10f6b8049d36..4371c9a1b87e 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -543,12 +543,12 @@ void kgsl_mmu_remove_global(struct kgsl_device *device, EXPORT_SYMBOL(kgsl_mmu_remove_global); void kgsl_mmu_add_global(struct kgsl_device *device, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { struct kgsl_mmu *mmu = &device->mmu; if (MMU_OP_VALID(mmu, mmu_add_global)) - mmu->mmu_ops->mmu_add_global(mmu, memdesc); + mmu->mmu_ops->mmu_add_global(mmu, memdesc, name); } EXPORT_SYMBOL(kgsl_mmu_add_global); @@ -620,7 +620,7 @@ static struct kgsl_mmu_pt_ops nommu_pt_ops = { }; static void nommu_add_global(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl); } diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index 53645cc1741c..acbc0e784cf2 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -75,7 +75,7 @@ struct kgsl_mmu_ops { (struct kgsl_mmu *mmu); int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *); void (*mmu_add_global)(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc); + struct kgsl_memdesc *memdesc, const char *name); void (*mmu_remove_global)(struct kgsl_mmu *mmu, struct kgsl_memdesc *memdesc); struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu, @@ -174,6 +174,7 @@ struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *, void kgsl_add_global_secure_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); +void kgsl_print_global_pt_entries(struct seq_file *s); void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable, @@ -198,7 +199,7 @@ int kgsl_mmu_find_region(struct kgsl_pagetable *pagetable, uint64_t *gpuaddr, uint64_t size, unsigned int align); void kgsl_mmu_add_global(struct kgsl_device *device, - struct kgsl_memdesc *memdesc); + struct kgsl_memdesc *memdesc, const char *name); void kgsl_mmu_remove_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc); diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index 565ae4c39fdd..9e6817c76df8 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -279,7 +279,7 @@ kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc) */ static inline int kgsl_allocate_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags, - unsigned int priv) + unsigned int priv, const char *name) { int ret; @@ -297,7 +297,7 @@ static inline int kgsl_allocate_global(struct kgsl_device *device, } if (ret == 0) - kgsl_mmu_add_global(device, memdesc); + kgsl_mmu_add_global(device, memdesc, name); return ret; } diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index f9d3ede718ab..dd004f9588e9 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -1042,9 +1042,6 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work) goto done; snapshot->mempool = vmalloc(size); - if (snapshot->mempool != NULL) - KGSL_CORE_ERR("snapshot: mempool address %p, size %zx\n", - snapshot->mempool, size); ptr = snapshot->mempool; snapshot->mempool_size = 0; diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 1b51eb591036..4ef9f80177d6 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1192,6 +1192,41 @@ TRACE_EVENT(sparse_unbind, ); +TRACE_EVENT(kgsl_clock_throttling, + TP_PROTO( + int idle_10pct, + int crc_50pct, + int crc_more50pct, + int crc_less50pct, + int adj + ), + TP_ARGS( + idle_10pct, + crc_50pct, + crc_more50pct, + crc_less50pct, + adj + ), + TP_STRUCT__entry( + __field(int, idle_10pct) + __field(int, crc_50pct) + __field(int, crc_more50pct) + __field(int, crc_less50pct) + __field(int, adj) + ), + TP_fast_assign( + __entry->idle_10pct = idle_10pct; + __entry->crc_50pct = crc_50pct; + __entry->crc_more50pct = crc_more50pct; + __entry->crc_less50pct = crc_less50pct; + __entry->adj = adj; + ), + TP_printk("idle_10=%d crc_50=%d crc_more50=%d crc_less50=%d adj=%d", + __entry->idle_10pct, __entry->crc_50pct, __entry->crc_more50pct, + __entry->crc_less50pct, __entry->adj + ) +); + #endif /* _KGSL_TRACE_H */ /* This part must be outside protection */ |
