summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h14
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c168
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.c969
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.h245
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h19
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hwio.h3
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c240
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c55
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h120
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c13
15 files changed, 1784 insertions, 79 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 61163578c06c..7c73657b399e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -89,4 +89,5 @@ obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \
sde/sde_hw_sspp.o \
sde/sde_hw_wb.o \
sde/sde_hw_pingpong.o \
+ sde/sde_hw_interrupts.o \
sde/sde_mdp_formats.o
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 97ffb01f8b70..3c30267e7283 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -57,8 +57,7 @@ struct sde_crtc {
static struct sde_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
-
- return to_sde_kms(to_mdp_kms(priv->kms));
+ return to_sde_kms(priv->kms);
}
static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index cb6e56be5d3d..2a3bc3004e6c 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -410,7 +410,7 @@ static struct drm_encoder *sde_encoder_virt_init(
struct drm_device *dev, struct display_info *disp_info)
{
struct msm_drm_private *priv = dev->dev_private;
- struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(priv->kms));
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct sde_encoder_virt *sde_enc = NULL;
int drm_enc_mode = DRM_MODE_ENCODER_NONE;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 427a6d94322e..27fc11175c19 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,15 +47,23 @@ struct sde_encoder_phys {
struct sde_encoder_phys_ops phys_ops;
struct sde_hw_intf *hw_intf;
struct sde_hw_ctl *hw_ctl;
- struct mdp_kms *mdp_kms;
+ struct sde_kms *sde_kms;
struct drm_display_mode cached_mode;
bool enabled;
spinlock_t spin_lock;
};
+/**
+ * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @irq_idx: IRQ interface lookup index
+ * @vblank_complete: for vblank irq synchronization
+ */
struct sde_encoder_phys_vid {
struct sde_encoder_phys base;
- struct mdp_irq vblank_irq;
+ int irq_idx;
+ struct completion vblank_complete;
};
struct sde_encoder_virt {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 0528c3d1ff8d..33d1a8eef7a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -20,9 +20,31 @@
#include "sde_encoder_phys.h"
#include "sde_mdp_formats.h"
+#define VBLANK_TIMEOUT msecs_to_jiffies(100)
+
#define to_sde_encoder_phys_vid(x) \
container_of(x, struct sde_encoder_phys_vid, base)
+static bool sde_encoder_phys_vid_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ bool ret = true;
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_wait_for_vblank(
+ struct sde_encoder_phys_vid *vid_enc)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = wait_for_completion_timeout(&vid_enc->vblank_complete,
+ VBLANK_TIMEOUT);
+ if (rc == 0)
+ DRM_ERROR("Timed out waiting for vblank irq\n");
+}
+
static void drm_mode_to_intf_timing_params(
const struct sde_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
@@ -195,19 +217,29 @@ static bool sde_encoder_phys_vid_mode_fixup(
return true;
}
+static void sde_encoder_phys_vid_flush_intf(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_intf *intf = phys_enc->hw_intf;
+ struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+ u32 flush_mask = 0;
+
+ DBG("");
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.setup_flush(ctl, flush_mask);
+
+ DBG("Flushing CTL_ID %d, flush_mask %x, INTF %d",
+ ctl->idx, flush_mask, intf->idx);
+}
+
static void sde_encoder_phys_vid_mode_set(
struct sde_encoder_phys *phys_enc,
struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *adj_mode)
{
- mode = adjusted_mode;
- phys_enc->cached_mode = *adjusted_mode;
-
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ phys_enc->cached_mode = *adj_mode;
+ DBG("intf %d, caching mode:", phys_enc->hw_intf->idx);
+ drm_mode_debug_printmodeline(adj_mode);
}
static void sde_encoder_phys_vid_setup_timing_engine(
@@ -249,56 +281,113 @@ static void sde_encoder_phys_vid_setup_timing_engine(
programmable_fetch_config(phys_enc, &p);
}
-static void sde_encoder_phys_vid_wait_for_vblank(
- struct sde_encoder_phys_vid *vid_enc)
+static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
{
- DBG("");
- mdp_irq_wait(vid_enc->base.mdp_kms, vid_enc->vblank_irq.irqmask);
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc = &vid_enc->base;
+
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+
+ /* signal VBLANK completion */
+ complete_all(&vid_enc->vblank_complete);
}
-static void sde_encoder_phys_vid_vblank_irq(struct mdp_irq *irq,
- uint32_t irqstatus)
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc =
- container_of(irq, struct sde_encoder_phys_vid,
- vblank_irq);
- struct sde_encoder_phys *phys_enc = &vid_enc->base;
+ to_sde_encoder_phys_vid(phys_enc);
+ struct sde_irq_callback irq_cb;
+ int ret = 0;
- phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+ vid_enc->irq_idx = sde_irq_idx_lookup(phys_enc->sde_kms,
+ SDE_IRQ_TYPE_INTF_VSYNC, phys_enc->hw_intf->idx);
+ if (vid_enc->irq_idx < 0) {
+ DRM_ERROR(
+ "Failed to lookup IRQ index for INTF_VSYNC with intf=%d\n",
+ phys_enc->hw_intf->idx);
+ return -EINVAL;
+ }
+
+ irq_cb.func = sde_encoder_phys_vid_vblank_irq;
+ irq_cb.arg = vid_enc;
+ ret = sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx,
+ &irq_cb);
+ if (ret) {
+ DRM_ERROR("Failed to register IRQ callback INTF_VSYNC\n");
+ return ret;
+ }
+
+ ret = sde_enable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to enable IRQ for INTF_VSYNC, intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+ vid_enc->irq_idx = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx,
+ NULL);
+ return ret;
+ }
+
+ DBG("Registered IRQ for intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+
+ return ret;
}
-static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+static int sde_encoder_phys_vid_unregister_irq(
+ struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_vid *vid_enc =
- to_sde_encoder_phys_vid(phys_enc);
- unsigned long lock_flags;
+ to_sde_encoder_phys_vid(phys_enc);
- DBG("");
+ sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx, NULL);
+ sde_disable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
- if (WARN_ON(phys_enc->enabled))
- return;
+ DBG("Un-Register IRQ for intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+
+ return 0;
+}
+
+static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+{
+ int ret = 0;
+
+ DBG("");
if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
sde_encoder_phys_vid_setup_timing_engine(phys_enc);
- spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
- phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
- spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+ sde_encoder_phys_vid_flush_intf(phys_enc);
+
+ /* Register for interrupt unless we're the slave encoder */
+ if (sde_encoder_phys_vid_is_master(phys_enc))
+ ret = sde_encoder_phys_vid_register_irq(phys_enc);
- phys_enc->enabled = true;
+ if (!ret && !phys_enc->enabled) {
+ unsigned long lock_flags = 0;
- mdp_irq_register(phys_enc->mdp_kms, &vid_enc->vblank_irq);
- DBG("Registered IRQ for intf %d mask 0x%X", phys_enc->hw_intf->idx,
- vid_enc->vblank_irq.irqmask);
+ /* Now enable timing engine */
+ spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
+ spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+
+ phys_enc->enabled = true;
+ }
}
static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
{
- struct sde_encoder_phys_vid *vid_enc =
- to_sde_encoder_phys_vid(phys_enc);
unsigned long lock_flags;
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
DBG("");
@@ -310,6 +399,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
+ reinit_completion(&vid_enc->vblank_complete);
spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
/*
@@ -321,7 +411,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
* scanout buffer) don't latch properly..
*/
sde_encoder_phys_vid_wait_for_vblank(vid_enc);
- mdp_irq_unregister(phys_enc->mdp_kms, &vid_enc->vblank_irq);
+ sde_encoder_phys_vid_unregister_irq(phys_enc);
phys_enc->enabled = false;
}
@@ -361,7 +451,6 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
{
struct sde_encoder_phys *phys_enc = NULL;
struct sde_encoder_phys_vid *vid_enc = NULL;
- u32 irq_mask = 0x8000000;
int ret = 0;
DBG("");
@@ -371,6 +460,9 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
ret = -ENOMEM;
goto fail;
}
+ vid_enc->irq_idx = -EINVAL;
+ init_completion(&vid_enc->vblank_complete);
+
phys_enc = &vid_enc->base;
phys_enc->hw_intf =
@@ -390,9 +482,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
sde_encoder_phys_vid_init_cbs(&phys_enc->phys_ops);
phys_enc->parent = parent;
phys_enc->parent_ops = parent_ops;
- phys_enc->mdp_kms = &sde_kms->base;
- vid_enc->vblank_irq.irq = sde_encoder_phys_vid_vblank_irq;
- vid_enc->vblank_irq.irqmask = irq_mask;
+ phys_enc->sde_kms = sde_kms;
spin_lock_init(&phys_enc->spin_lock);
DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 5273dd69410f..46972f2d5dfd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -282,6 +282,10 @@ struct sde_wb_sub_blocks {
u32 maxlinewidth;
};
+struct sde_mdss_base_cfg {
+ SDE_HW_BLK_INFO;
+};
+
/* struct sde_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
@@ -411,6 +415,9 @@ struct sde_ad_cfg {
struct sde_mdss_cfg {
u32 hwversion;
+ u32 mdss_count;
+ struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
+
u32 mdp_count;
struct sde_mdp_cfg mdp[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
index 68736bece06b..7fb5a0616838 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
@@ -146,6 +146,10 @@ static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg)
/* Setup Register maps and defaults */
*cfg = (struct sde_mdss_cfg){
+ .mdss_count = 1,
+ .mdss = {
+ {.id = MDP_TOP, .base = 0x00000000, .features = 0}
+ },
.mdp_count = 1,
.mdp = {
{.id = MDP_TOP, .base = 0x00001000, .features = 0,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
new file mode 100644
index 000000000000..99aa2e59dd85
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -0,0 +1,969 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_mdp_util.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDSS base
+ */
+#define HW_INTR_STATUS 0x0010
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define MDP_INTF_0_OFF 0x6B000
+#define MDP_INTF_1_OFF 0x6B800
+#define MDP_INTF_2_OFF 0x6C000
+#define MDP_INTF_3_OFF 0x6C800
+#define MDP_INTF_4_OFF 0x6D000
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define SDE_INTR_WB_0_DONE BIT(0)
+#define SDE_INTR_WB_1_DONE BIT(1)
+#define SDE_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
+#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
+#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
+#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
+#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_DONE BIT(8)
+#define SDE_INTR_PING_PONG_1_DONE BIT(9)
+#define SDE_INTR_PING_PONG_2_DONE BIT(10)
+#define SDE_INTR_PING_PONG_3_DONE BIT(11)
+#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
+#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
+#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
+#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
+#define SDE_INTR_INTF_0_VSYNC BIT(25)
+#define SDE_INTR_INTF_1_VSYNC BIT(27)
+#define SDE_INTR_INTF_2_VSYNC BIT(29)
+#define SDE_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
+#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
+#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
+#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
+#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
+#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
+#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
+#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
+#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define SDE_INTR_PROG_LINE BIT(8)
+
+/**
+ * struct sde_intr_reg - array of SDE register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct sde_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct sde_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in sde_intr_type
+ * @instance_idx: instance index of the associated HW block in SDE
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct sde_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of SDE interrupt registers
+ */
+static const struct sde_intr_reg sde_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct sde_irq_type sde_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_WR_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40-43 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_RD_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 44-47 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 72-75 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 76-79 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 84-87 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 88-91 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ SDE_INTR_VIDEO_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ SDE_INTR_VIDEO_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ SDE_INTR_VIDEO_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ SDE_INTR_VIDEO_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ SDE_INTR_VIDEO_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+};
+
+static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
+ if (intr_type == sde_irq_map[i].intr_type &&
+ instance_idx == sde_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ SDE_REG_WRITE(&intr->hw, reg_off, mask);
+}
+
+static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for sde_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the sde_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
+ (sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_interrupt_status(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "SDE IRQ already set:";
+ } else {
+ dbgstr = "SDE IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "SDE IRQ is already cleared:";
+ } else {
+ dbgstr = "SDE IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
+ return 0;
+}
+
+static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
+ uint32_t *mask)
+{
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+ return 0;
+}
+
+static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
+ uint32_t *sources)
+{
+ *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+ return 0;
+}
+
+static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
+ sde_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ sde_irq_map[irq_idx].irq_mask);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+}
+
+
+static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
+{
+ ops->set_mask = sde_hw_intr_set_mask;
+ ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
+ ops->enable_irq = sde_hw_intr_enable_irq;
+ ops->disable_irq = sde_hw_intr_disable_irq;
+ ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = sde_hw_intr_clear_irqs;
+ ops->disable_all_irqs = sde_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
+ ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+}
+
+static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
+ void __iomem *addr, struct sde_hw_blk_reg_map *hw)
+{
+ if (m->mdp_count == 0)
+ return NULL;
+
+ hw->base_off = addr;
+ hw->blk_off = m->mdss[0].base;
+ hw->hwversion = m->hwversion;
+ return &m->mdss[0];
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ struct sde_mdss_base_cfg *cfg;
+
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = __intr_offset(m, addr, &intr->hw);
+ if (!cfg) {
+ kfree(intr);
+ return ERR_PTR(-EINVAL);
+ }
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->mask_lock);
+ spin_lock_init(&intr->status_lock);
+
+ return intr;
+}
+
+void sde_hw_intr_destroy(struct sde_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
new file mode 100644
index 000000000000..0ddb1e78a953
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTERRUPTS_H
+#define _SDE_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdp_util.h"
+#include "sde_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * sde_intr_type - HW Interrupt Type
+ * @SDE_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @SDE_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @SDE_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @SDE_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @SDE_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @SDE_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @SDE_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @SDE_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @SDE_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @SDE_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @SDE_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @SDE_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum sde_intr_type {
+ SDE_IRQ_TYPE_WB_ROT_COMP,
+ SDE_IRQ_TYPE_WB_WFD_COMP,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ SDE_IRQ_TYPE_PING_PONG_WR_PTR,
+ SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
+ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ SDE_IRQ_TYPE_CWB_OVERFLOW,
+ SDE_IRQ_TYPE_HIST_VIG_DONE,
+ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ SDE_IRQ_TYPE_HIST_DSPP_DONE,
+ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ SDE_IRQ_TYPE_WD_TIMER,
+ SDE_IRQ_TYPE_SFI_VIDEO_IN,
+ SDE_IRQ_TYPE_SFI_VIDEO_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_0_IN,
+ SDE_IRQ_TYPE_SFI_CMD_0_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_1_IN,
+ SDE_IRQ_TYPE_SFI_CMD_1_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_2_IN,
+ SDE_IRQ_TYPE_SFI_CMD_2_OUT,
+ SDE_IRQ_TYPE_PROG_LINE,
+ SDE_IRQ_TYPE_RESERVED,
+};
+
+struct sde_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct sde_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct sde_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in sde_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum sde_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct sde_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within SDE. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct sde_hw_intr *intr,
+ uint32_t *mask);
+
+ /**
+ * get_interrupt_sources - Gets the bitmask of the SDE interrupt
+ * source that are currently fired.
+ * @intr: HW interrupt handle
+ * @sources: Returning the SDE interrupt source status bit mask
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_interrupt_sources)(
+ struct sde_hw_intr *intr,
+ uint32_t *sources);
+};
+
+/**
+ * struct sde_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @mask_lock: spinlock for accessing IRQ mask
+ * @status_lock: spinlock for accessing IRQ status
+ */
+struct sde_hw_intr {
+ struct sde_hw_blk_reg_map hw;
+ struct sde_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t mask_lock;
+ spinlock_t status_lock;
+};
+
+/**
+ * sde_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index ed5bdf5e5327..075e78042f17 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -108,6 +108,7 @@ enum sde_pingpong {
PINGPONG_2,
PINGPONG_3,
PINGPONG_4,
+ PINGPONG_S0,
PINGPONG_MAX
};
@@ -154,6 +155,24 @@ enum sde_ad {
AD_MAX
};
+enum sde_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum sde_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
/**
* MDP HW,Component order color map
*/
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
index df0530ecfa34..2531463b654e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hwio.h
+++ b/drivers/gpu/drm/msm/sde/sde_hwio.h
@@ -28,6 +28,9 @@
#define HIST_INTR_EN 0x01c
#define HIST_INTR_STATUS 0x020
#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
#define SPLIT_DISPLAY_EN 0x2F4
#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index 7f87acb86c96..722845df3d0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -12,36 +12,263 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/kthread.h>
#include "msm_drv.h"
#include "sde_kms.h"
-void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
- uint32_t old_irqmask)
+static void sde_irq_callback_handler(void *arg, int irq_idx)
{
+ struct sde_kms *sde_kms = arg;
+ struct sde_irq *irq_obj = &sde_kms->irq_obj;
+
+ /*
+ * Perform registered function callback
+ */
+ if (irq_obj->irq_cb_tbl && irq_obj->irq_cb_tbl[irq_idx].func)
+ irq_obj->irq_cb_tbl[irq_idx].func(
+ irq_obj->irq_cb_tbl[irq_idx].arg,
+ irq_idx);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: sde_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr,
+ irq_idx);
+}
+
+static void sde_irq_intf_error_handler(void *arg, int irq_idx)
+{
+ DRM_ERROR("INTF underrun detected, irq_idx=%d\n", irq_idx);
+}
+
+void sde_set_irqmask(struct sde_kms *sde_kms, uint32_t reg, uint32_t irqmask)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.set_mask)
+ return;
+
+ sde_kms->hw_intr->ops.set_mask(sde_kms->hw_intr, reg, irqmask);
+}
+
+int sde_irq_idx_lookup(struct sde_kms *sde_kms, enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+int sde_enable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.enable_irq)
+ return -EINVAL;
+
+ for (i = 0; i < irq_count; i++) {
+ ret = sde_kms->hw_intr->ops.enable_irq(
+ sde_kms->hw_intr,
+ irq_idxs[i]);
+ if (ret) {
+ DRM_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idxs[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int sde_disable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_irq)
+ return -EINVAL;
+
+ for (i = 0; i < irq_count; i++) {
+ ret = sde_kms->hw_intr->ops.disable_irq(
+ sde_kms->hw_intr,
+ irq_idxs[i]);
+ if (ret) {
+ DRM_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idxs[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int sde_register_irq_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ struct sde_irq_callback *irq_cb_tbl;
+ unsigned long irq_flags;
+
+ /*
+ * We allow NULL register_irq_cb as input for callback registration
+ */
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ DRM_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq_cb_tbl = sde_kms->irq_obj.irq_cb_tbl;
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ irq_cb_tbl[irq_idx].func = register_irq_cb ?
+ register_irq_cb->func : NULL;
+ irq_cb_tbl[irq_idx].arg = register_irq_cb ?
+ register_irq_cb->arg : NULL;
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+void sde_clear_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+}
+
+void sde_disable_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
}
void sde_irq_preinstall(struct msm_kms *kms)
{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sde_enable(sde_kms);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_disable(sde_kms);
+
+ spin_lock_init(&sde_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+ sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(struct sde_irq_callback), GFP_KERNEL);
+ if (!sde_kms->irq_obj.irq_cb_tbl)
+ DRM_ERROR("Fail to allocate memory of IRQ callback list\n");
}
int sde_irq_postinstall(struct msm_kms *kms)
{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_irq_callback irq_cb;
+ int irq_idx;
+ int i;
+
+ irq_cb.func = sde_irq_intf_error_handler;
+ irq_cb.arg = sde_kms;
+
+ /* Register interface underrun callback */
+ sde_enable(sde_kms);
+ for (i = 0; i < sde_kms->catalog->intf_count; i++) {
+ irq_idx = sde_irq_idx_lookup(sde_kms,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN, i+INTF_0);
+ sde_register_irq_callback(sde_kms, irq_idx, &irq_cb);
+ sde_enable_irq(sde_kms, &irq_idx, 1);
+ }
+ sde_disable(sde_kms);
+
return 0;
}
void sde_irq_uninstall(struct msm_kms *kms)
{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sde_enable(sde_kms);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_disable(sde_kms);
+
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+}
+
+static void _sde_irq_mdp_done(struct sde_kms *sde_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * sde_irq_callback_handler with the irq_idx from the lookup table.
+ * sde_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ sde_kms->hw_intr->ops.dispatch_irqs(
+ sde_kms->hw_intr,
+ sde_irq_callback_handler,
+ sde_kms);
}
irqreturn_t sde_irq(struct msm_kms *kms)
{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ u32 interrupts;
+
+ sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
+ &interrupts);
+
+ /*
+ * Taking care of MDP interrupt
+ */
+ if (interrupts & IRQ_SOURCE_MDP) {
+ interrupts &= ~IRQ_SOURCE_MDP;
+ _sde_irq_mdp_done(sde_kms);
+ }
+
+ /*
+ * Routing all other interrupts to external drivers
+ */
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ sde_kms->irqcontroller.domain, hwirq));
+ interrupts &= ~(1 << hwirq);
+ }
+
return IRQ_HANDLED;
}
int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
- return 0;
+ return sde_crtc_vblank(crtc);
}
void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
@@ -76,6 +303,13 @@ static int sde_hw_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
struct sde_kms *sde_kms = d->host_data;
+ uint32_t valid_irqs;
+
+ sde_kms->hw_intr->ops.get_valid_interrupts(sde_kms->hw_intr,
+ &valid_irqs);
+
+ if (!(valid_irqs & (1 << hwirq)))
+ return -EPERM;
irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, sde_kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 89deca7c16ee..251003e5382c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -52,16 +52,14 @@ int sde_enable(struct sde_kms *sde_kms)
static void sde_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms));
-
+ struct sde_kms *sde_kms = to_sde_kms(kms);
sde_enable(sde_kms);
}
static void sde_complete_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms));
-
+ struct sde_kms *sde_kms = to_sde_kms(kms);
sde_disable(sde_kms);
}
@@ -165,30 +163,28 @@ static void sde_preclose(struct msm_kms *kms, struct drm_file *file)
static void sde_destroy(struct msm_kms *kms)
{
- struct sde_kms *sde_kms = to_sde_kms(to_mdp_kms(kms));
+ struct sde_kms *sde_kms = to_sde_kms(kms);
sde_irq_domain_fini(sde_kms);
+ sde_hw_intr_destroy(sde_kms->hw_intr);
kfree(sde_kms);
}
-static const struct mdp_kms_funcs kms_funcs = {
- .base = {
- .hw_init = sde_hw_init,
- .irq_preinstall = sde_irq_preinstall,
- .irq_postinstall = sde_irq_postinstall,
- .irq_uninstall = sde_irq_uninstall,
- .irq = sde_irq,
- .prepare_commit = sde_prepare_commit,
- .complete_commit = sde_complete_commit,
- .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done,
- .enable_vblank = sde_enable_vblank,
- .disable_vblank = sde_disable_vblank,
- .get_format = mdp_get_format,
- .round_pixclk = sde_round_pixclk,
- .preclose = sde_preclose,
- .destroy = sde_destroy,
- },
- .set_irqmask = sde_set_irqmask,
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = sde_hw_init,
+ .irq_preinstall = sde_irq_preinstall,
+ .irq_postinstall = sde_irq_postinstall,
+ .irq_uninstall = sde_irq_uninstall,
+ .irq = sde_irq,
+ .prepare_commit = sde_prepare_commit,
+ .complete_commit = sde_complete_commit,
+ .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done,
+ .enable_vblank = sde_enable_vblank,
+ .disable_vblank = sde_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = sde_round_pixclk,
+ .preclose = sde_preclose,
+ .destroy = sde_destroy,
};
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -219,9 +215,9 @@ struct sde_kms *sde_hw_setup(struct platform_device *pdev)
if (!sde_kms)
return NULL;
- mdp_kms_init(&sde_kms->base, &kms_funcs);
+ msm_kms_init(&sde_kms->base, &kms_funcs);
- kms = &sde_kms->base.base;
+ kms = &sde_kms->base;
sde_kms->mmio = msm_ioremap(pdev, "mdp_phys", "SDE");
if (IS_ERR(sde_kms->mmio)) {
@@ -440,7 +436,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
}
sde_kms->dev = dev;
- msm_kms = &sde_kms->base.base;
+ msm_kms = &sde_kms->base;
/*
* Currently hardcoding to MDSS version 1.7.0 (8996)
@@ -483,6 +479,13 @@ struct msm_kms *sde_kms_init(struct drm_device *dev)
dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth;
dev->mode_config.max_height = 4096;
+ sde_enable(sde_kms);
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ sde_disable(sde_kms);
+
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr))
+ goto fail;
+
return msm_kms;
fail:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 7be7233c2b97..e56fa16423e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -18,9 +18,32 @@
#include "mdp/mdp_kms.h"
#include "sde_hw_catalog.h"
#include "sde_hw_mdss.h"
+#include "sde_hw_interrupts.h"
+
+/*
+ * struct sde_irq_callback - IRQ callback handlers
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct sde_irq_callback {
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct sde_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @cb_lock: callback lock
+ */
+struct sde_irq {
+ u32 total_irqs;
+ struct sde_irq_callback *irq_cb_tbl;
+ spinlock_t cb_lock;
+};
struct sde_kms {
- struct mdp_kms base;
+ struct msm_kms base;
struct drm_device *dev;
int rev;
struct sde_mdss_cfg *catalog;
@@ -48,6 +71,14 @@ struct sde_kms {
unsigned long enabled_mask;
struct irq_domain *domain;
} irqcontroller;
+
+ struct sde_hw_intr *hw_intr;
+ struct sde_irq irq_obj;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
};
#define to_sde_kms(x) container_of(x, struct sde_kms, base)
@@ -77,12 +108,95 @@ struct sde_plane_state {
int sde_disable(struct sde_kms *sde_kms);
int sde_enable(struct sde_kms *sde_kms);
-void sde_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
- uint32_t old_irqmask);
+/**
+ * IRQ functions
+ */
+int sde_irq_domain_init(struct sde_kms *sde_kms);
+int sde_irq_domain_fini(struct sde_kms *sde_kms);
void sde_irq_preinstall(struct msm_kms *kms);
int sde_irq_postinstall(struct msm_kms *kms);
void sde_irq_uninstall(struct msm_kms *kms);
irqreturn_t sde_irq(struct msm_kms *kms);
+
+/**
+ * sde_set_irqmask - IRQ helper function for writing IRQ mask
+ * to SDE HW interrupt register.
+ * @sde_kms: SDE handle
+ * @reg_off: SDE HW interrupt register offset
+ * @irqmask: IRQ mask
+ */
+void sde_set_irqmask(
+ struct sde_kms *sde_kms,
+ uint32_t reg_off,
+ uint32_t irqmask);
+
+/**
+ * sde_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @sde_kms: SDE handle
+ * @intr_type: SDE HW interrupt type for lookup
+ * @instance_idx: SDE HW block instance defined in sde_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int sde_irq_idx_lookup(
+ struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * sde_enable_irq - IRQ helper function for enabling one or more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ */
+int sde_enable_irq(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_disable_irq - IRQ helper function for diabling one of more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ */
+int sde_disable_irq(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_register_irq_callback - For registering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * @return: 0 for success registering callback, otherwise failure
+ */
+int sde_register_irq_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+/**
+ * sde_clear_all_irqs - Clearing all SDE IRQ interrupt status
+ * @sde_kms: SDE handle
+ */
+void sde_clear_all_irqs(struct sde_kms *sde_kms);
+
+/**
+ * sde_disable_all_irqs - Diabling all SDE IRQ interrupt
+ * @sde_kms: SDE handle
+ */
+void sde_disable_all_irqs(struct sde_kms *sde_kms);
+
+/**
+ * Vblank enable/disable functions
+ */
int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index d917a807cd17..cf34de2f1e3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -690,7 +690,16 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
enum drm_plane_type type;
priv = dev->dev_private;
- kms = to_sde_kms(to_mdp_kms(priv->kms));
+ if (!priv) {
+ DRM_ERROR("[%u]Private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DRM_ERROR("[%u]Invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_sde_kms(priv->kms);
psde = kzalloc(sizeof(*psde), GFP_KERNEL);
if (!psde) {
@@ -753,6 +762,6 @@ fail:
pr_err("%s: Plane creation failed\n", __func__);
if (plane)
sde_plane_destroy(plane);
-
+exit:
return ERR_PTR(ret);
}