aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c245
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c868
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c69
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h65
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c88
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c345
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c76
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h186
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h343
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h365
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h426
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h463
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h220
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h445
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h105
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h204
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h112
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h431
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h258
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h499
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h455
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h476
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c161
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c162
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c262
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c72
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c116
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c71
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c42
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c130
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c40
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c30
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c30
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c9
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c378
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h32
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c65
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c33
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c79
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c32
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c345
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c94
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h27
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c8
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c20
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h2
115 files changed, 6903 insertions, 4204 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a78662bd6273..6309a857ca31 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -21,7 +21,7 @@ config DRM_MSM
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_SCHED
- select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
select SHMEM
select TMPFS
select QCOM_SCM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 8d02d8c33069..49671364fdcf 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -106,6 +106,7 @@ msm-y += \
msm_gpu_devfreq.o \
msm_io_utils.o \
msm_iommu.o \
+ msm_kms.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index c67089a7ebc1..0d8133f3174b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -205,7 +205,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
for (i = 3; i <= 5; i++)
- if ((SZ_16K << i) == adreno_gpu->gmem)
+ if ((SZ_16K << i) == adreno_gpu->info->gmem)
break;
gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
@@ -540,6 +540,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
if (adreno_is_a20x(adreno_gpu))
adreno_gpu->registers = a200_registers;
else if (adreno_is_a225(adreno_gpu))
@@ -547,10 +551,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
- if (ret)
- goto fail;
-
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
if (!allow_vram_carveout) {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 715436cb3996..8b4cdf95f445 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -145,7 +145,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
/* Early A430's have a timing issue with SP/TP power collapse;
disabling HW clock gating prevents it. */
- if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ if (adreno_is_a430(adreno_gpu) && adreno_patchid(adreno_gpu) < 2)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
else
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index bbb1bf33f98e..e5916c106796 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -66,7 +66,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
- struct msm_gem_object *obj;
+ struct drm_gem_object *obj;
uint32_t *ptr, dwords;
unsigned int i;
@@ -83,7 +83,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
obj = submit->bos[submit->cmd[i].idx].obj;
dwords = submit->cmd[i].size;
- ptr = msm_gem_get_vaddr(&obj->base);
+ ptr = msm_gem_get_vaddr(obj);
/* _get_vaddr() shouldn't fail at this point,
* since we've already mapped it once in
@@ -103,7 +103,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
OUT_RING(ring, ptr[i]);
}
- msm_gem_put_vaddr(&obj->base);
+ msm_gem_put_vaddr(obj);
break;
}
@@ -749,7 +749,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
- 0x00100000 + adreno_gpu->gmem - 1);
+ 0x00100000 + adreno_gpu->info->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
@@ -1770,7 +1770,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
nr_rings = 4;
- if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev))
+ if (config->info->revn == 510)
nr_rings = 1;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 0e63a1429189..7705f8010484 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -179,7 +179,7 @@ static void a540_lm_setup(struct msm_gpu *gpu)
/* The battery current limiter isn't enabled for A540 */
config = AGC_LM_CONFIG_BCL_DISABLED;
- config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+ config |= adreno_patchid(adreno_gpu) << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
/* For now disable GPMU side throttling */
config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 4dc3be6ed45d..863b5e3b0e67 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1114,6 +1114,12 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+#define A6XX_CP_APRIV_CNTL_CDWRITE 0x00000040
+#define A6XX_CP_APRIV_CNTL_CDREAD 0x00000020
+#define A6XX_CP_APRIV_CNTL_RBRPWB 0x00000008
+#define A6XX_CP_APRIV_CNTL_RBPRIVLEVEL 0x00000004
+#define A6XX_CP_APRIV_CNTL_RBFETCH 0x00000002
+#define A6XX_CP_APRIV_CNTL_ICACHE 0x00000001
#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0
@@ -1166,6 +1172,9 @@ static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+#define A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE 0x00000008
+#define A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN 0x00000002
+#define A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN 0x00000001
static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
@@ -1936,6 +1945,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f
+
#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
@@ -8249,5 +8260,6 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 5deb79924897..8c4900444b2c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -200,9 +203,10 @@ int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 mask, reset_val, val;
int ret;
- u32 val;
- u32 mask, reset_val;
val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
if (val <= 0x20010004) {
@@ -218,7 +222,11 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
- gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+ if (adreno_is_a7xx(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_9, 0);
+ else
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
@@ -511,6 +519,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0;
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
@@ -518,7 +527,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) ||
+ adreno_is_a660_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
@@ -542,20 +553,26 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4,
+ adreno_is_a740_family(adreno_gpu) ? 0x80000021 : 0x80000000);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+ /* The second spin of A7xx GPUs messed with some register offsets.. */
+ if (adreno_is_a740_family(adreno_gpu))
+ seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740;
+
/* Load RSC sequencer uCode for sleep and wakeup */
- if (adreno_is_a650_family(adreno_gpu)) {
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
+ gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad);
} else {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
@@ -635,11 +652,18 @@ err:
/* Set up the idle state for the GMU */
static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
+ /* A7xx knows better by default! */
+ if (adreno_is_a7xx(adreno_gpu))
+ return;
+
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
switch (gmu->idle_level) {
@@ -676,12 +700,6 @@ struct block_header {
u32 data[];
};
-/* this should be a general kernel helper */
-static int in_range(u32 addr, u32 start, u32 size)
-{
- return addr >= start && addr < start + size;
-}
-
static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
{
if (!in_range(blk->addr, bo->iova, bo->size))
@@ -702,7 +720,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
- if (adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@@ -751,14 +769,22 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 fence_range_lower, fence_range_upper;
+ u32 chipid, chipid_min = 0;
int ret;
- u32 chipid;
- if (adreno_is_a650_family(adreno_gpu)) {
+ /* Vote veto for FAL10 */
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
+ /* Turn on TCM (Tightly Coupled Memory) retention */
+ if (adreno_is_a7xx(adreno_gpu))
+ a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1);
+ else
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
if (ret)
@@ -768,9 +794,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
- /* Turn on register retention */
- gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
-
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -780,6 +803,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
+ /* Clear init result to make sure we are getting a fresh value */
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
@@ -787,18 +811,68 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ fence_range_upper = 0x32;
+ fence_range_lower = 0x8a0;
+ } else {
+ fence_range_upper = 0xa;
+ fence_range_lower = 0xa0;
+ }
+
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
- (1 << 31) | (0xa << 18) | (0xa0));
+ BIT(31) |
+ FIELD_PREP(GENMASK(30, 18), fence_range_upper) |
+ FIELD_PREP(GENMASK(17, 0), fence_range_lower));
+
+ /*
+ * Snapshots toggle the NMI bit which will result in a jump to the NMI
+ * handler instead of __main. Set the M3 config value to avoid that.
+ */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
+
+ /* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */
+ if (adreno_is_a7xx(adreno_gpu) && !adreno_is_a730(adreno_gpu)) {
+ /* A7xx GPUs have obfuscated chip IDs. Use constant maj = 7 */
+ chipid = FIELD_PREP(GENMASK(31, 24), 0x7);
+
+ /*
+ * The min part has a 1-1 mapping for each GPU SKU.
+ * This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming,
+ * where X = major, Y = minor, Z = patchlevel, e.g. GEN7_2_1 for prod A740.
+ */
+ if (adreno_is_a740(adreno_gpu))
+ chipid_min = 2;
+ else
+ return -EINVAL;
- chipid = adreno_gpu->rev.core << 24;
- chipid |= adreno_gpu->rev.major << 16;
- chipid |= adreno_gpu->rev.minor << 12;
- chipid |= adreno_gpu->rev.patchid << 8;
+ chipid |= FIELD_PREP(GENMASK(23, 16), chipid_min);
- gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+ /* Get the patchid (which may vary) from the device tree */
+ chipid |= FIELD_PREP(GENMASK(15, 8), adreno_patchid(adreno_gpu));
+ } else {
+ /*
+ * Note that the GMU has a slightly different layout for
+ * chip_id, for whatever reason, so a bit of massaging
+ * is needed. The upper 16b are the same, but minor and
+ * patchid are packed in four bits each with the lower
+ * 8b unused:
+ */
+ chipid = adreno_gpu->chip_id & 0xffff0000;
+ chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
+ chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
+ }
+
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_10, chipid);
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_8,
+ (gmu->log.iova & GENMASK(31, 12)) |
+ ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
+ } else {
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
- gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
- gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+ }
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -849,17 +923,23 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 val, seqmem_off = 0;
+
+ /* The second spin of A7xx GPUs messed with some register offsets.. */
+ if (adreno_is_a740_family(adreno_gpu))
+ seqmem_off = 4;
/* Make sure there are no outstanding RPMh votes */
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
- (val & 1), 100, 1000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@@ -887,6 +967,13 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
+ /* Clear the WRITEDROPPED fields and put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ /* Make sure the above writes go through */
+ wmb();
+
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
@@ -935,6 +1022,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
+ /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
+ if (!IS_ERR(gmu->qmp)) {
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
+ 0 /* Hardcode ACD to be disabled for now */);
+ if (ret)
+ dev_err(gmu->dev, "failed to send GPU ACD state\n");
+ }
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -948,7 +1043,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
- clk_set_rate(gmu->hub_clk, 150000000);
+ clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ?
+ 200000000 : 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
@@ -965,15 +1061,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
- status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
- GMU_WARM_BOOT : GMU_COLD_BOOT;
-
- /*
- * Warm boot path does not work on newer GPUs
- * Presumably this is because icache/dcache regions must be restored
- */
- if (!gmu->legacy)
+ if (adreno_is_a7xx(adreno_gpu)) {
+ status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+ } else if (gmu->legacy) {
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+ } else {
+ /*
+ * Warm boot path does not work on newer A6xx GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
status = GMU_COLD_BOOT;
+ }
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
@@ -1435,8 +1535,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = to_platform_device(gmu->dev);
- if (!gmu->initialized)
+ mutex_lock(&gmu->lock);
+ if (!gmu->initialized) {
+ mutex_unlock(&gmu->lock);
return;
+ }
+
+ gmu->initialized = false;
+
+ mutex_unlock(&gmu->lock);
pm_runtime_force_suspend(gmu->dev);
@@ -1451,6 +1558,9 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
dev_pm_domain_detach(gmu->gxpd, false);
}
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
iounmap(gmu->rscc);
@@ -1466,8 +1576,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
-
- gmu->initialized = false;
}
static int cxpd_notifier_cb(struct notifier_block *nb,
@@ -1549,6 +1657,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
+ struct device_link *link;
int ret;
if (!pdev)
@@ -1580,7 +1689,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
- if (adreno_is_a660_family(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
@@ -1596,7 +1706,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
/* Note that a650 family also includes a660 family: */
- if (adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
@@ -1617,7 +1728,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
- } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) {
+ } else if (adreno_is_a630_family(adreno_gpu)) {
/* HFI v1, has sptprac */
gmu->legacy = true;
@@ -1627,13 +1738,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
}
- /* Allocate memory for for the HFI queues */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
if (ret)
goto err_memory;
- /* Allocate memory for the GMU log region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
+ /* Allocate memory for for the HFI queues */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
@@ -1644,7 +1755,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
}
- if (adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
@@ -1669,12 +1781,18 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_mmio;
}
- if (!device_link_add(gmu->dev, gmu->cxpd,
- DL_FLAG_PM_RUNTIME)) {
+ link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME);
+ if (!link) {
ret = -ENODEV;
goto detach_cxpd;
}
+ gmu->qmp = qmp_get(gmu->dev);
+ if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
+ ret = PTR_ERR(gmu->qmp);
+ goto remove_device_link;
+ }
+
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
@@ -1698,6 +1816,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
+remove_device_link:
+ device_link_del(link);
+
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 236f81a43caa..592b296aab22 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -8,6 +8,7 @@
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -96,6 +97,8 @@ struct a6xx_gmu {
/* For power domain callback */
struct notifier_block pd_nb;
struct completion pd_gate;
+
+ struct qmp *qmp;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 9ab15d91aced..5b66efafc901 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -360,6 +360,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+#define REG_A6XX_GMU_GENERAL_8 0x000051cd
+
+#define REG_A6XX_GMU_GENERAL_9 0x000051ce
+
+#define REG_A6XX_GMU_GENERAL_10 0x000051cf
+
#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
@@ -425,6 +431,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+#define REG_A6XX_GMU_AHB_FENCE_STATUS_CLR 0x00009314
+
#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
@@ -469,6 +477,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
+#define REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740 0x00000154
+
#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index b3ada1e7b598..7a0220d29a23 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -103,6 +103,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
@@ -114,9 +115,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
return;
if (!sysprof) {
- /* Turn off protected mode to write to special registers */
- OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
- OUT_RING(ring, 0);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+ }
OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
OUT_RING(ring, 1);
@@ -142,6 +145,16 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
/*
+ * Sync both threads after switching pagetables and enable BR only
+ * to make sure BV doesn't race ahead while BR is still switching
+ * pagetables.
+ */
+ if (adreno_is_a7xx(&a6xx_gpu->base)) {
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
+ }
+
+ /*
* And finally, trigger a uche flush to be sure there isn't anything
* lingering in that part of the GPU
*/
@@ -163,9 +176,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
- /* Re-enable protected mode: */
- OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
- OUT_RING(ring, 1);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Re-enable protected mode: */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
}
}
@@ -252,6 +267,133 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ /*
+ * Toggle concurrent binning for pagetable switch and set the thread to
+ * BR since only it can execute the pagetable switch packets.
+ */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
+
+ a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_start));
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x101); /* IFPC disable */
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00d); /* IB1LIST start */
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00e); /* IB1LIST end */
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_end));
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CCU_INVALIDATE_COLOR);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BV);
+
+ /*
+ * Make sure the timestamp is committed once BV pipe is
+ * completely done with this submission.
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_CLEAN | BIT(27));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
+
+ /*
+ * This makes sure that BR doesn't race ahead and commit
+ * timestamp to memstore while BV is still processing
+ * this submission.
+ */
+ OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, submit->seqno);
+
+ /* write the ringbuffer timestamp */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x100); /* IFPC enable */
+
+ trace_msm_gpu_submit_flush(submit,
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+
+ a6xx_flush(gpu, ring);
+}
+
const struct adreno_reglist a612_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
@@ -695,6 +837,121 @@ const struct adreno_reglist a690_hwcg[] = {
{}
};
+const struct adreno_reglist a730_hwcg[] = {
+ { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf },
+ { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
+ { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
+ { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
+ { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000223 },
+ { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
+ { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
+ { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
+ {},
+};
+
+const struct adreno_reglist a740_hwcg[] = {
+ { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x22022222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x003cf3cf },
+ { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000444 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
+ { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
+ { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
+ { REG_A7XX_RBBM_CLOCK_HYST2_VFD, 0x00000000 },
+ { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
+ { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
+ { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
+ {},
+};
+
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -702,7 +959,7 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
- u32 val, clock_cntl_on;
+ u32 val, clock_cntl_on, cgc_mode;
if (!adreno_gpu->info->hwcg)
return;
@@ -714,6 +971,17 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
+ if (adreno_is_a7xx(adreno_gpu)) {
+ cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? cgc_mode : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? 0x10111 : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? 0x5555 : 0);
+ }
+
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
@@ -721,14 +989,14 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- if (!adreno_is_a610(adreno_gpu))
+ if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- if (!adreno_is_a610(adreno_gpu))
+ if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@@ -897,6 +1165,59 @@ static const u32 a690_protect[] = {
A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
};
+static const u32 a730_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x0058),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_RDONLY(0x005fb, 0x009d),
+ A6XX_PROTECT_NORDWR(0x00699, 0x01e9),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
+ A6XX_PROTECT_RDONLY(0x008de, 0x0154),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x00b2),
+ A6XX_PROTECT_NORDWR(0x00a41, 0x01be),
+ A6XX_PROTECT_NORDWR(0x00df0, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e07, 0x0008),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x0280),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e40, 0x0000),
+ A6XX_PROTECT_NORDWR(0x09e64, 0x000d),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x000f),
+ A6XX_PROTECT_NORDWR(0x0ae66, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0ae6f, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0ec00, 0x0fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x0053),
+ A6XX_PROTECT_RDONLY(0x18454, 0x0004),
+ A6XX_PROTECT_NORDWR(0x18459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1c459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
+ A6XX_PROTECT_NORDWR(0x1f878, 0x002a),
+ /* CP_PROTECT_REG[44, 46] are left untouched! */
+ 0,
+ 0,
+ 0,
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000),
+};
+
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -918,6 +1239,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
count = ARRAY_SIZE(a660_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
+ } else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) {
+ regs = a730_protect;
+ count = ARRAY_SIZE(a730_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a730_protect) > 48);
} else {
regs = a6xx_protect;
count = ARRAY_SIZE(a6xx_protect);
@@ -930,10 +1256,16 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
* protect violation and select the last span to protect from the start
* address all the way to the end of the register address space
*/
- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
-
- for (i = 0; i < count - 1; i++)
- gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL,
+ A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN |
+ A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN |
+ A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE);
+
+ for (i = 0; i < count - 1; i++) {
+ /* Intentionally skip writing to some registers */
+ if (regs[i])
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ }
/* last CP_PROTECT to have "infinite" length on the last entry */
gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
}
@@ -978,7 +1310,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a650(adreno_gpu) ||
+ adreno_is_a660(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
hbb_lo = 3;
amsbc = 1;
@@ -1011,6 +1346,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv << 4 | min_acc_len << 3 |
hbb_lo << 1 | ubwc_mode);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
+ FIELD_PREP(GENMASK(8, 5), hbb_lo));
+
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
}
@@ -1043,6 +1382,55 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+static int a7xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ u32 mask;
+
+ /* Disable concurrent binning before sending CP init */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, BIT(27));
+
+ OUT_PKT7(ring, CP_ME_INIT, 7);
+
+ /* Use multiple HW contexts */
+ mask = BIT(0);
+
+ /* Enable error detection */
+ mask |= BIT(1);
+
+ /* Set default reset state */
+ mask |= BIT(3);
+
+ /* Disable save/restore of performance counters across preemption */
+ mask |= BIT(6);
+
+ /* Enable the register init list with the spinlock */
+ mask |= BIT(8);
+
+ OUT_RING(ring, mask);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Operation mode mask */
+ OUT_RING(ring, 0x00000002);
+
+ /* *Don't* send a power up reg list for concurrent binning (TODO) */
+ /* Lo address */
+ OUT_RING(ring, 0x00000000);
+ /* Hi address */
+ OUT_RING(ring, 0x00000000);
+ /* BIT(31) set => read the regs from the list */
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
@@ -1059,6 +1447,10 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
if (IS_ERR(buf))
return false;
+ /* A7xx is safe! */
+ if (adreno_is_a7xx(adreno_gpu))
+ return true;
+
/*
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
@@ -1175,27 +1567,53 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
}
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
- A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
- A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
- A6XX_RBBM_INT_0_MASK_CP_IB2 | \
- A6XX_RBBM_INT_0_MASK_CP_IB1 | \
- A6XX_RBBM_INT_0_MASK_CP_RB | \
- A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
- A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
- A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
- A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
- A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+#define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_SW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
+ A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
+ A6XX_RBBM_INT_0_MASK_TSBWRITEERROR)
+
+#define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \
+ A6XX_CP_APRIV_CNTL_RBFETCH | \
+ A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \
+ A6XX_CP_APRIV_CNTL_RBRPWB)
+
+#define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \
+ A6XX_CP_APRIV_CNTL_CDREAD | \
+ A6XX_CP_APRIV_CNTL_CDWRITE)
static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u64 gmem_range_min;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
/* Make sure the GMU keeps the GPU on while we set it up */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (ret)
+ return ret;
}
/* Clear GBIF halt in case GX domain was not collapsed */
@@ -1211,6 +1629,10 @@ static int hw_init(struct msm_gpu *gpu)
mb();
}
+ /* Some GPUs are stubborn and take their sweet time to unhalt GBIF! */
+ if (adreno_is_a7xx(adreno_gpu) && a6xx_has_gbif(adreno_gpu))
+ spin_until(!gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK));
+
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
if (adreno_is_a619_holi(adreno_gpu))
@@ -1224,19 +1646,21 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
- /* Turn on 64 bit addressing for all blocks */
- gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Turn on 64 bit addressing for all blocks */
+ gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+ }
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
@@ -1244,12 +1668,14 @@ static int hw_init(struct msm_gpu *gpu)
/* VBIF/GBIF start*/
if (adreno_is_a610(adreno_gpu) ||
adreno_is_a640_family(adreno_gpu) ||
- adreno_is_a650_family(adreno_gpu)) {
+ adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
+ adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
@@ -1257,24 +1683,39 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0);
+
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
/* Disable L2 bypass in the UCHE */
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ } else {
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ }
+
+ if (!(adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu))) {
+ gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
- if (!adreno_is_a650_family(adreno_gpu)) {
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000);
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min);
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
- 0x00100000 + adreno_gpu->gmem - 1);
+ gmem_range_min + adreno_gpu->info->gmem - 1);
}
- gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
- gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23));
+ else {
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+ }
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
@@ -1282,7 +1723,7 @@ static int hw_init(struct msm_gpu *gpu)
} else if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
- } else {
+ } else if (!adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
}
@@ -1294,7 +1735,7 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
- } else
+ } else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
@@ -1310,7 +1751,7 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
- else
+ else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the AHB default slave response to "ERROR" */
@@ -1319,13 +1760,22 @@ static int hw_init(struct msm_gpu *gpu)
/* Turn on performance counters */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ /* Turn on the IFPC counter (countable 4 on XOCLK4) */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
+ FIELD_PREP(GENMASK(7, 0), 0x4));
+ }
+
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
- if (adreno_is_a619(adreno_gpu))
+ if (adreno_is_a730(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
+ else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
@@ -1365,15 +1815,31 @@ static int hw_init(struct msm_gpu *gpu)
/* Set dualQ + disable afull for A660 GPU */
if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+ else if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
+ FIELD_PREP(GENMASK(19, 16), 6) |
+ FIELD_PREP(GENMASK(15, 12), 6) |
+ FIELD_PREP(GENMASK(11, 8), 9) |
+ BIT(3) | BIT(2) |
+ FIELD_PREP(GENMASK(1, 0), 2));
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
- gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
- (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ A7XX_BR_APRIVMASK);
+ gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL,
+ A7XX_APRIV_MASK);
+ gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL,
+ A7XX_APRIV_MASK);
+ } else
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1));
}
/* Enable interrupts */
- gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK,
+ adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK);
ret = adreno_hw_init(gpu);
if (ret)
@@ -1400,6 +1866,12 @@ static int hw_init(struct msm_gpu *gpu)
shadowptr(a6xx_gpu, gpu->rb[0]));
}
+ /* ..which means "always" on A7xx, also for BV shadow */
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
+ rbmemptr(gpu->rb[0], bv_fence));
+ }
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@@ -1408,7 +1880,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
- ret = a6xx_cp_init(gpu);
+ ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
if (ret)
goto out;
@@ -1645,7 +2117,7 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
(val & 0x3ffff), val);
}
- if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu)))
dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
@@ -1729,16 +2201,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
-static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
-{
- return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
-}
-
-static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
-{
- msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
-}
-
static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
{
llcc_slice_deactivate(a6xx_gpu->llc_slice);
@@ -1805,6 +2267,35 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
}
+static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (IS_ERR(a6xx_gpu->llc_mmio))
+ return;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= GENMASK(4, 0);
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
+ FIELD_PREP(GENMASK(29, 25), gpu_scid) |
+ FIELD_PREP(GENMASK(24, 20), gpu_scid) |
+ FIELD_PREP(GENMASK(19, 15), gpu_scid) |
+ FIELD_PREP(GENMASK(14, 10), gpu_scid) |
+ FIELD_PREP(GENMASK(9, 5), gpu_scid) |
+ FIELD_PREP(GENMASK(4, 0), gpu_scid));
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
+ FIELD_PREP(GENMASK(14, 10), gpu_scid) |
+ BIT(8));
+ }
+
+ llcc_slice_activate(a6xx_gpu->htw_llc_slice);
+}
+
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
{
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
@@ -1816,7 +2307,7 @@ static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
}
static void a6xx_llc_slices_init(struct platform_device *pdev,
- struct a6xx_gpu *a6xx_gpu)
+ struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
{
struct device_node *phandle;
@@ -1825,18 +2316,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
return;
/*
- * There is a different programming path for targets with an mmu500
- * attached, so detect if that is the case
+ * There is a different programming path for A6xx targets with an
+ * mmu500 attached, so detect if that is the case
*/
phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
a6xx_gpu->have_mmu500 = (phandle &&
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
- if (a6xx_gpu->have_mmu500)
- a6xx_gpu->llc_mmio = NULL;
- else
+ if (is_a7xx || !a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
+ else
+ a6xx_gpu->llc_mmio = NULL;
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
@@ -1922,7 +2413,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- a6xx_llc_activate(a6xx_gpu);
+ adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2091,9 +2582,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_llc_slices_destroy(a6xx_gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_remove(a6xx_gpu);
- mutex_unlock(&a6xx_gpu->gmu.lock);
adreno_gpu_cleanup(adreno_gpu);
@@ -2204,159 +2693,19 @@ static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return progress;
}
-static u32 a610_get_speed_bin(u32 fuse)
-{
- /*
- * There are (at least) three SoCs implementing A610: SM6125 (trinket),
- * SM6115 (bengal) and SM6225 (khaje). Trinket does not have speedbinning,
- * as only a single SKU exists and we don't support khaje upstream yet.
- * Hence, this matching table is only valid for bengal and can be easily
- * expanded if need be.
- */
-
- if (fuse == 0)
- return 0;
- else if (fuse == 206)
- return 1;
- else if (fuse == 200)
- return 2;
- else if (fuse == 157)
- return 3;
- else if (fuse == 127)
- return 4;
-
- return UINT_MAX;
-}
-
-static u32 a618_get_speed_bin(u32 fuse)
-{
- if (fuse == 0)
- return 0;
- else if (fuse == 169)
- return 1;
- else if (fuse == 174)
- return 2;
-
- return UINT_MAX;
-}
-
-static u32 a619_holi_get_speed_bin(u32 fuse)
-{
- /*
- * There are (at least) two SoCs implementing A619_holi: SM4350 (holi)
- * and SM6375 (blair). Limit the fuse matching to the corresponding
- * SoC to prevent bogus frequency setting (as improbable as it may be,
- * given unexpected fuse values are.. unexpected! But still possible.)
- */
-
- if (fuse == 0)
- return 0;
-
- if (of_machine_is_compatible("qcom,sm4350")) {
- if (fuse == 138)
- return 1;
- else if (fuse == 92)
- return 2;
- } else if (of_machine_is_compatible("qcom,sm6375")) {
- if (fuse == 190)
- return 1;
- else if (fuse == 177)
- return 2;
- } else
- pr_warn("Unknown SoC implementing A619_holi!\n");
-
- return UINT_MAX;
-}
-
-static u32 a619_get_speed_bin(u32 fuse)
+static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse)
{
- if (fuse == 0)
- return 0;
- else if (fuse == 120)
- return 4;
- else if (fuse == 138)
- return 3;
- else if (fuse == 169)
- return 2;
- else if (fuse == 180)
- return 1;
-
- return UINT_MAX;
-}
-
-static u32 a640_get_speed_bin(u32 fuse)
-{
- if (fuse == 0)
- return 0;
- else if (fuse == 1)
- return 1;
-
- return UINT_MAX;
-}
-
-static u32 a650_get_speed_bin(u32 fuse)
-{
- if (fuse == 0)
- return 0;
- else if (fuse == 1)
- return 1;
- /* Yep, 2 and 3 are swapped! :/ */
- else if (fuse == 2)
- return 3;
- else if (fuse == 3)
- return 2;
-
- return UINT_MAX;
-}
+ if (!info->speedbins)
+ return UINT_MAX;
-static u32 adreno_7c3_get_speed_bin(u32 fuse)
-{
- if (fuse == 0)
- return 0;
- else if (fuse == 117)
- return 0;
- else if (fuse == 190)
- return 1;
+ for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++)
+ if (info->speedbins[i].fuse == fuse)
+ return BIT(info->speedbins[i].speedbin);
return UINT_MAX;
}
-static u32 fuse_to_supp_hw(struct device *dev, struct adreno_gpu *adreno_gpu, u32 fuse)
-{
- u32 val = UINT_MAX;
-
- if (adreno_is_a610(adreno_gpu))
- val = a610_get_speed_bin(fuse);
-
- if (adreno_is_a618(adreno_gpu))
- val = a618_get_speed_bin(fuse);
-
- else if (adreno_is_a619_holi(adreno_gpu))
- val = a619_holi_get_speed_bin(fuse);
-
- else if (adreno_is_a619(adreno_gpu))
- val = a619_get_speed_bin(fuse);
-
- else if (adreno_is_7c3(adreno_gpu))
- val = adreno_7c3_get_speed_bin(fuse);
-
- else if (adreno_is_a640(adreno_gpu))
- val = a640_get_speed_bin(fuse);
-
- else if (adreno_is_a650(adreno_gpu))
- val = a650_get_speed_bin(fuse);
-
- if (val == UINT_MAX) {
- DRM_DEV_ERROR(dev,
- "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
- fuse);
- return UINT_MAX;
- }
-
- return (1 << val);
-}
-
-static int a6xx_set_supported_hw(struct device *dev, struct adreno_gpu *adreno_gpu)
+static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info)
{
u32 supp_hw;
u32 speedbin;
@@ -2375,7 +2724,14 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_gpu *adreno_g
return ret;
}
- supp_hw = fuse_to_supp_hw(dev, adreno_gpu, speedbin);
+ supp_hw = fuse_to_supp_hw(info, speedbin);
+
+ if (supp_hw == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ speedbin);
+ supp_hw = BIT(0); /* Default */
+ }
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
if (ret)
@@ -2444,16 +2800,47 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.get_timestamp = a6xx_get_timestamp,
};
+static const struct adreno_gpu_funcs funcs_a7xx = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
+
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
- const struct adreno_info *info;
struct device_node *node;
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ bool is_a7xx;
int ret;
a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
@@ -2474,35 +2861,24 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
- /*
- * We need to know the platform type before calling into adreno_gpu_init
- * so that the hw_apriv flag can be correctly set. Snoop into the info
- * and grab the revision number
- */
- info = adreno_info(config->rev);
- if (!info)
- return ERR_PTR(-EINVAL);
+ adreno_gpu->base.hw_apriv =
+ !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
- /* Assign these early so that we can use the is_aXYZ helpers */
- /* Numeric revision IDs (e.g. 630) */
- adreno_gpu->revn = info->revn;
- /* New-style ADRENO_REV()-only */
- adreno_gpu->rev = info->rev;
- /* Quirk data */
- adreno_gpu->info = info;
+ /* gpu->info only gets assigned in adreno_gpu_init() */
+ is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
+ config->info->family == ADRENO_7XX_GEN2;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
- adreno_gpu->base.hw_apriv = true;
+ a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
- a6xx_llc_slices_init(pdev, a6xx_gpu);
-
- ret = a6xx_set_supported_hw(&pdev->dev, adreno_gpu);
+ ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
- if (adreno_has_gmu_wrapper(adreno_gpu))
+ if (is_a7xx)
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
+ else if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
else
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index c788b06e72da..34822b080759 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -39,8 +39,8 @@ struct a6xx_gpu {
/*
* Given a register and a count, return a value to program into
- * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
- * registers starting at _reg.
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for
+ * _len + 1 registers starting at _reg.
*/
#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
@@ -62,6 +62,21 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
return true;
}
+static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
+{
+ return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
+}
+
+static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg)
+{
+ return msm_readl(a6xx_gpu->llc_mmio + (reg << 2));
+}
+
+static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
+{
+ msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
+}
+
#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
((_ring)->id * sizeof(uint32_t)))
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 4e5d650578c6..91a564a24dbe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -882,12 +882,13 @@ static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
}
}
+#define A6XX_REGLIST_SIZE 1
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
- int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ int i, count = A6XX_REGLIST_SIZE +
ARRAY_SIZE(a6xx_reglist) +
ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
@@ -901,12 +902,20 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state->nr_registers = count;
- for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ if (adreno_is_a7xx(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
- a6xx_state, &a6xx_ahb_reglist[i],
+ a6xx_state, &a7xx_ahb_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist,
&a6xx_state->registers[index++]);
- if (a6xx_has_gbif(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a7xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
@@ -948,6 +957,18 @@ static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
}
+static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu)
+{
+ /*
+ * The value at CP_ROQ_THRESHOLDS_2[20:31] is in 4dword units.
+ * That register however is not directly accessible from APSS on A7xx.
+ * Program the SQE_UCODE_DBG_ADDR with offset=0x70d3 and read the value.
+ */
+ gpu_write(gpu, REG_A6XX_CP_SQE_UCODE_DBG_ADDR, 0x70d3);
+
+ return 4 * (gpu_read(gpu, REG_A6XX_CP_SQE_UCODE_DBG_DATA) >> 20);
+}
+
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -1019,8 +1040,40 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+}
- a6xx_state->nr_indexed_regs = count;
+static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ int i, indexed_count, mempool_count;
+
+ indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
+ mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed);
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state,
+ indexed_count + mempool_count,
+ sizeof(*a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ a6xx_state->nr_indexed_regs = indexed_count + mempool_count;
+
+ /* First read the common regs */
+ for (i = 0; i < indexed_count; i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, 0, BIT(2));
+ gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, 0, BIT(2));
+
+ /* Get the contents of the CP_BV mempool */
+ for (i = 0; i < mempool_count; i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, a7xx_cp_bv_mempool_indexed,
+ &a6xx_state->indexed_regs[indexed_count - 1 + i]);
+
+ gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(2), 0);
+ gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, BIT(2), 0);
+ return;
}
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1056,6 +1109,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
return &a6xx_state->base;
/* Get the banks of indexed registers */
+ if (adreno_is_a7xx(adreno_gpu)) {
+ a7xx_get_indexed_registers(gpu, a6xx_state);
+ /* Further codeflow is untested on A7xx. */
+ return &a6xx_state->base;
+ }
+
a6xx_get_indexed_registers(gpu, a6xx_state);
/*
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e788ed72eb0d..9560fc1b858a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -328,9 +328,8 @@ static const u32 a6xx_gbif_registers[] = {
0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
};
-static const struct a6xx_registers a6xx_ahb_reglist[] = {
- REGS(a6xx_ahb_registers, 0, 0),
-};
+static const struct a6xx_registers a6xx_ahb_reglist =
+ REGS(a6xx_ahb_registers, 0, 0);
static const struct a6xx_registers a6xx_vbif_reglist =
REGS(a6xx_vbif_registers, 0, 0);
@@ -338,6 +337,27 @@ static const struct a6xx_registers a6xx_vbif_reglist =
static const struct a6xx_registers a6xx_gbif_reglist =
REGS(a6xx_gbif_registers, 0, 0);
+static const u32 a7xx_ahb_registers[] = {
+ /* RBBM_STATUS */
+ 0x210, 0x210,
+ /* RBBM_STATUS2-3 */
+ 0x212, 0x213,
+};
+
+static const u32 a7xx_gbif_registers[] = {
+ 0x3c00, 0x3c0b,
+ 0x3c40, 0x3c42,
+ 0x3c45, 0x3c47,
+ 0x3c49, 0x3c4a,
+ 0x3cc0, 0x3cd1,
+};
+
+static const struct a6xx_registers a7xx_ahb_reglist=
+ REGS(a7xx_ahb_registers, 0, 0);
+
+static const struct a6xx_registers a7xx_gbif_reglist =
+ REGS(a7xx_gbif_registers, 0, 0);
+
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -384,14 +404,17 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
};
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
+static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
-static struct a6xx_indexed_registers {
+struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
u32 (*count_fn)(struct msm_gpu *gpu);
-} a6xx_indexed_reglist[] = {
+};
+
+static struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
@@ -402,11 +425,43 @@ static struct a6xx_indexed_registers {
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
+static struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
+ { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
+ { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
+};
+
static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
+static struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
+ { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
+ { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
+ REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
+};
+
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
static const struct a6xx_debugbus_block {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 25b235b49ebc..cdb3f6e74d3e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -5,6 +5,8 @@
#include <linux/circ_buf.h>
#include <linux/list.h>
+#include <soc/qcom/cmd-db.h>
+
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
#include "a6xx_gpu.h"
@@ -506,6 +508,88 @@ static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+
+static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 12;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x7;
+
+ msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
+ msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
+ msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[1][0] = 0x600002e8;
+ msg->ddr_cmds_data[1][1] = 0x600003d0;
+ msg->ddr_cmds_data[1][2] = 0x60000008;
+ msg->ddr_cmds_data[2][0] = 0x6000068d;
+ msg->ddr_cmds_data[2][1] = 0x6000089a;
+ msg->ddr_cmds_data[2][2] = 0x60000008;
+ msg->ddr_cmds_data[3][0] = 0x600007f2;
+ msg->ddr_cmds_data[3][1] = 0x60000a6e;
+ msg->ddr_cmds_data[3][2] = 0x60000008;
+ msg->ddr_cmds_data[4][0] = 0x600009e5;
+ msg->ddr_cmds_data[4][1] = 0x60000cfd;
+ msg->ddr_cmds_data[4][2] = 0x60000008;
+ msg->ddr_cmds_data[5][0] = 0x60000b29;
+ msg->ddr_cmds_data[5][1] = 0x60000ea6;
+ msg->ddr_cmds_data[5][2] = 0x60000008;
+ msg->ddr_cmds_data[6][0] = 0x60001698;
+ msg->ddr_cmds_data[6][1] = 0x60001da8;
+ msg->ddr_cmds_data[6][2] = 0x60000008;
+ msg->ddr_cmds_data[7][0] = 0x600018d2;
+ msg->ddr_cmds_data[7][1] = 0x60002093;
+ msg->ddr_cmds_data[7][2] = 0x60000008;
+ msg->ddr_cmds_data[8][0] = 0x60001e66;
+ msg->ddr_cmds_data[8][1] = 0x600027e6;
+ msg->ddr_cmds_data[8][2] = 0x60000008;
+ msg->ddr_cmds_data[9][0] = 0x600027c2;
+ msg->ddr_cmds_data[9][1] = 0x6000342f;
+ msg->ddr_cmds_data[9][2] = 0x60000008;
+ msg->ddr_cmds_data[10][0] = 0x60002e71;
+ msg->ddr_cmds_data[10][1] = 0x60003cf5;
+ msg->ddr_cmds_data[10][2] = 0x60000008;
+ msg->ddr_cmds_data[11][0] = 0x600030ae;
+ msg->ddr_cmds_data[11][1] = 0x60003fe5;
+ msg->ddr_cmds_data[11][2] = 0x60000008;
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x1;
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x7;
+
+ msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
+ msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
+ msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /* TODO: add a proper dvfs table */
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x1;
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -564,6 +648,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
a660_build_bw_table(&msg);
else if (adreno_is_a690(adreno_gpu))
a690_build_bw_table(&msg);
+ else if (adreno_is_a730(adreno_gpu))
+ a730_build_bw_table(&msg);
+ else if (adreno_is_a740_family(adreno_gpu))
+ a740_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index ce8d0b2475bf..41b13dec9bef 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -22,9 +22,9 @@ module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
static const struct adreno_info gpulist[] = {
{
- .rev = ADRENO_REV(2, 0, 0, 0),
+ .chip_ids = ADRENO_CHIP_IDS(0x02000000),
+ .family = ADRENO_2XX_GEN1,
.revn = 200,
- .name = "A200",
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
@@ -33,9 +33,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, { /* a200 on i.mx51 has only 128kib gmem */
- .rev = ADRENO_REV(2, 0, 0, 1),
+ .chip_ids = ADRENO_CHIP_IDS(0x02000001),
+ .family = ADRENO_2XX_GEN1,
.revn = 201,
- .name = "A200",
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
@@ -44,9 +44,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
- .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x02020000),
+ .family = ADRENO_2XX_GEN2,
.revn = 220,
- .name = "A220",
.fw = {
[ADRENO_FW_PM4] = "leia_pm4_470.fw",
[ADRENO_FW_PFP] = "leia_pfp_470.fw",
@@ -55,9 +55,12 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
- .rev = ADRENO_REV(3, 0, 5, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03000512,
+ 0x03000520
+ ),
+ .family = ADRENO_3XX,
.revn = 305,
- .name = "A305",
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
@@ -66,9 +69,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
- .rev = ADRENO_REV(3, 0, 6, 0),
+ .chip_ids = ADRENO_CHIP_IDS(0x03000600),
+ .family = ADRENO_3XX,
.revn = 307, /* because a305c is revn==306 */
- .name = "A306",
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
@@ -77,9 +80,13 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
- .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03020000,
+ 0x03020001,
+ 0x03020002
+ ),
+ .family = ADRENO_3XX,
.revn = 320,
- .name = "A320",
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
@@ -88,9 +95,13 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
- .rev = ADRENO_REV(3, 3, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03030000,
+ 0x03030001,
+ 0x03030002
+ ),
+ .family = ADRENO_3XX,
.revn = 330,
- .name = "A330",
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
@@ -99,9 +110,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
- .rev = ADRENO_REV(4, 0, 5, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x04000500),
+ .family = ADRENO_4XX,
.revn = 405,
- .name = "A405",
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
@@ -110,9 +121,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
- .rev = ADRENO_REV(4, 2, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x04020000),
+ .family = ADRENO_4XX,
.revn = 420,
- .name = "A420",
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
@@ -121,9 +132,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
- .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x04030002),
+ .family = ADRENO_4XX,
.revn = 430,
- .name = "A430",
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
@@ -132,9 +143,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
- .rev = ADRENO_REV(5, 0, 6, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05000600),
+ .family = ADRENO_5XX,
.revn = 506,
- .name = "A506",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -150,9 +161,9 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a506_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 0, 8, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05000800),
+ .family = ADRENO_5XX,
.revn = 508,
- .name = "A508",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -167,9 +178,9 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a508_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 0, 9, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05000900),
+ .family = ADRENO_5XX,
.revn = 509,
- .name = "A509",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -185,9 +196,9 @@ static const struct adreno_info gpulist[] = {
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 1, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05010000),
+ .family = ADRENO_5XX,
.revn = 510,
- .name = "A510",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -200,9 +211,9 @@ static const struct adreno_info gpulist[] = {
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
- .rev = ADRENO_REV(5, 1, 2, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05010200),
+ .family = ADRENO_5XX,
.revn = 512,
- .name = "A512",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -217,9 +228,12 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a512_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 3, 0, 2),
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x05030002,
+ 0x05030004
+ ),
+ .family = ADRENO_5XX,
.revn = 530,
- .name = "A530",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -236,9 +250,9 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x05040001),
+ .family = ADRENO_5XX,
.revn = 540,
- .name = "A540",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
@@ -254,9 +268,9 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
- .rev = ADRENO_REV(6, 1, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06010000),
+ .family = ADRENO_6XX_GEN1,
.revn = 610,
- .name = "A610",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
@@ -265,21 +279,42 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
.hwcg = a612_hwcg,
+ /*
+ * There are (at least) three SoCs implementing A610: SM6125
+ * (trinket), SM6115 (bengal) and SM6225 (khaje). Trinket does
+ * not have speedbinning, as only a single SKU exists and we
+ * don't support khaje upstream yet. Hence, this matching
+ * table is only valid for bengal.
+ */
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 206, 1 },
+ { 200, 2 },
+ { 157, 3 },
+ { 127, 4 },
+ ),
}, {
- .rev = ADRENO_REV(6, 1, 8, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06010800),
+ .family = ADRENO_6XX_GEN1,
.revn = 618,
- .name = "A618",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 169, 1 },
+ { 174, 2 },
+ ),
}, {
- .rev = ADRENO_REV(6, 1, 9, ANY_ID),
+ .machine = "qcom,sm4350",
+ .chip_ids = ADRENO_CHIP_IDS(0x06010900),
+ .family = ADRENO_6XX_GEN1,
.revn = 619,
- .name = "A619",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
@@ -289,96 +324,204 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 138, 1 },
+ { 92, 2 },
+ ),
}, {
- .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .machine = "qcom,sm6375",
+ .chip_ids = ADRENO_CHIP_IDS(0x06010901),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 190, 1 },
+ { 177, 2 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010900),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 120, 4 },
+ { 138, 3 },
+ { 169, 2 },
+ { 180, 1 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x06030001,
+ 0x06030002
+ ),
+ .family = ADRENO_6XX_GEN1,
.revn = 630,
- .name = "A630",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.hwcg = a630_hwcg,
}, {
- .rev = ADRENO_REV(6, 4, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06040001),
+ .family = ADRENO_6XX_GEN2,
.revn = 640,
- .name = "A640",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 1, 1 },
+ ),
}, {
- .rev = ADRENO_REV(6, 5, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06050002),
+ .family = ADRENO_6XX_GEN3,
.revn = 650,
- .name = "A650",
.fw = {
[ADRENO_FW_SQE] = "a650_sqe.fw",
[ADRENO_FW_GMU] = "a650_gmu.bin",
},
.gmem = SZ_1M + SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
.address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 1, 1 },
+ { 2, 3 }, /* Yep, 2 and 3 are swapped! :/ */
+ { 3, 2 },
+ ),
}, {
- .rev = ADRENO_REV(6, 6, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06060001),
+ .family = ADRENO_6XX_GEN4,
.revn = 660,
- .name = "A660",
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_1M + SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
}, {
- .rev = ADRENO_REV(6, 3, 5, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06030500),
+ .family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
+ .zapfw = "a660_zap.mbn",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 117, 0 },
+ { 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
+ { 190, 1 },
+ ),
}, {
- .rev = ADRENO_REV(6, 8, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06080000),
+ .family = ADRENO_6XX_GEN2,
.revn = 680,
- .name = "A680",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
}, {
- .rev = ADRENO_REV(6, 9, 0, ANY_ID),
+ .chip_ids = ADRENO_CHIP_IDS(0x06090000),
+ .family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
- [ADRENO_FW_GMU] = "a690_gmu.bin",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_4M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a690_zap.mdt",
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x07030001),
+ .family = ADRENO_7XX_GEN1,
+ .fw = {
+ [ADRENO_FW_SQE] = "a730_sqe.fw",
+ [ADRENO_FW_GMU] = "gmu_gen70000.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a730_zap.mdt",
+ .hwcg = a730_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
+ .family = ADRENO_7XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "a740_sqe.fw",
+ [ADRENO_FW_GMU] = "gmu_gen70200.bin",
+ },
+ .gmem = 3 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a740_zap.mdt",
+ .hwcg = a740_hwcg,
+ .address_space_size = SZ_16G,
},
};
@@ -395,34 +538,31 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
MODULE_FIRMWARE("qcom/a619_gmu.bin");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
MODULE_FIRMWARE("qcom/a630_zap.mbn");
-
-static inline bool _rev_match(uint8_t entry, uint8_t id)
-{
- return (entry == ANY_ID) || (entry == id);
-}
-
-bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
-{
-
- return _rev_match(rev1.core, rev2.core) &&
- _rev_match(rev1.major, rev2.major) &&
- _rev_match(rev1.minor, rev2.minor) &&
- _rev_match(rev1.patchid, rev2.patchid);
-}
-
-const struct adreno_info *adreno_info(struct adreno_rev rev)
+MODULE_FIRMWARE("qcom/a640_gmu.bin");
+MODULE_FIRMWARE("qcom/a650_gmu.bin");
+MODULE_FIRMWARE("qcom/a650_sqe.fw");
+MODULE_FIRMWARE("qcom/a660_gmu.bin");
+MODULE_FIRMWARE("qcom/a660_sqe.fw");
+MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
+MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
+MODULE_FIRMWARE("qcom/yamato_pfp.fw");
+MODULE_FIRMWARE("qcom/yamato_pm4.fw");
+
+static const struct adreno_info *adreno_info(uint32_t chip_id)
{
- int i;
-
/* identify gpu: */
- for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ for (int i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i];
- if (adreno_cmp_rev(info->rev, rev))
- return info;
+ if (info->machine && !of_machine_is_compatible(info->machine))
+ continue;
+ for (int j = 0; info->chip_ids[j]; j++)
+ if (info->chip_ids[j] == chip_id)
+ return info;
}
return NULL;
@@ -502,12 +642,11 @@ err_disable_rpm:
return NULL;
}
-static int find_chipid(struct device *dev, struct adreno_rev *rev)
+static int find_chipid(struct device *dev, uint32_t *chipid)
{
struct device_node *node = dev->of_node;
const char *compat;
int ret;
- u32 chipid;
/* first search the compat strings for qcom,adreno-XYZ.W: */
ret = of_property_read_string_index(node, "compatible", 0, &compat);
@@ -516,32 +655,34 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
- rev->core = r / 100;
+ uint32_t core, major, minor;
+
+ core = r / 100;
r %= 100;
- rev->major = r / 10;
+ major = r / 10;
r %= 10;
- rev->minor = r;
- rev->patchid = patch;
+ minor = r;
+
+ *chipid = (core << 24) |
+ (major << 16) |
+ (minor << 8) |
+ patch;
return 0;
}
+
+ if (sscanf(compat, "qcom,adreno-%08x", chipid) == 1)
+ return 0;
}
/* and if that fails, fall back to legacy "qcom,chipid" property: */
- ret = of_property_read_u32(node, "qcom,chipid", &chipid);
+ ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret) {
DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
- rev->core = (chipid >> 24) & 0xff;
- rev->major = (chipid >> 16) & 0xff;
- rev->minor = (chipid >> 8) & 0xff;
- rev->patchid = (chipid & 0xff);
-
dev_warn(dev, "Using legacy qcom,chipid binding!\n");
- dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
- rev->core, rev->major, rev->minor, rev->patchid);
return 0;
}
@@ -555,26 +696,27 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
struct msm_gpu *gpu;
int ret;
- ret = find_chipid(dev, &config.rev);
+ ret = find_chipid(dev, &config.chip_id);
if (ret)
return ret;
dev->platform_data = &config;
priv->gpu_pdev = to_platform_device(dev);
- info = adreno_info(config.rev);
-
+ info = adreno_info(config.chip_id);
if (!info) {
- dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
- config.rev.core, config.rev.major,
- config.rev.minor, config.rev.patchid);
+ dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
+ ADRENO_CHIPID_ARGS(config.chip_id));
return -ENXIO;
}
- DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
- config.rev.minor, config.rev.patchid);
+ config.info = info;
- priv->is_a2xx = config.rev.core == 2;
+ DBG("Found GPU: %"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config.chip_id));
+
+ priv->is_a2xx = info->family < ADRENO_3XX;
+ priv->has_cached_coherent =
+ !!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
gpu = info->init(drm);
if (IS_ERR(gpu)) {
@@ -586,10 +728,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- if (config.rev.core >= 6)
- if (!adreno_has_gmu_wrapper(to_adreno_gpu(gpu)))
- priv->has_cached_coherent = true;
-
return 0;
}
@@ -645,10 +783,9 @@ static int adreno_probe(struct platform_device *pdev)
return 0;
}
-static int adreno_remove(struct platform_device *pdev)
+static void adreno_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &a3xx_ops);
- return 0;
}
static void adreno_shutdown(struct platform_device *pdev)
@@ -763,7 +900,7 @@ static const struct dev_pm_ops adreno_pm_ops = {
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
- .remove = adreno_remove,
+ .remove_new = adreno_remove,
.shutdown = adreno_shutdown,
.driver = {
.name = "adreno",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 5c5901d65950..3fe9fd240cc7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -320,16 +320,17 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = adreno_gpu->info->revn;
return 0;
case MSM_PARAM_GMEM_SIZE:
- *value = adreno_gpu->gmem;
+ *value = adreno_gpu->info->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu))
+ *value = 0;
+ else
+ *value = 0x100000;
return 0;
case MSM_PARAM_CHIP_ID:
- *value = (uint64_t)adreno_gpu->rev.patchid |
- ((uint64_t)adreno_gpu->rev.minor << 8) |
- ((uint64_t)adreno_gpu->rev.major << 16) |
- ((uint64_t)adreno_gpu->rev.core << 24);
+ *value = adreno_gpu->chip_id;
if (!adreno_gpu->info->revn)
*value |= ((uint64_t) adreno_gpu->speedbin) << 32;
return 0;
@@ -400,17 +401,9 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_CMDLINE: {
char *str, **paramp;
- str = kmalloc(len + 1, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
-
- if (copy_from_user(str, u64_to_user_ptr(value), len)) {
- kfree(str);
- return -EFAULT;
- }
-
- /* Ensure string is null terminated: */
- str[len] = '\0';
+ str = memdup_user_nul(u64_to_user_ptr(value), len);
+ if (IS_ERR(str))
+ return PTR_ERR(str);
mutex_lock(&gpu->lock);
@@ -578,6 +571,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start;
ring->next = ring->start;
ring->memptrs->rptr = 0;
+ ring->memptrs->bv_fence = ring->fctx->completed_fence;
/* Detect and clean up an impossible fence, ie. if GPU managed
* to scribble something invalid, we don't want that to confuse
@@ -847,10 +841,9 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
if (IS_ERR_OR_NULL(state))
return;
- drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
- adreno_gpu->info->revn, adreno_gpu->rev.core,
- adreno_gpu->rev.major, adreno_gpu->rev.minor,
- adreno_gpu->rev.patchid);
+ drm_printf(p, "revision: %u (%"ADRENO_CHIPID_FMT")\n",
+ adreno_gpu->info->revn,
+ ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
/*
* If this is state collected due to iova fault, so fault related info
*
@@ -921,10 +914,9 @@ void adreno_dump_info(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
- printk("revision: %d (%d.%d.%d.%d)\n",
- adreno_gpu->info->revn, adreno_gpu->rev.core,
- adreno_gpu->rev.major, adreno_gpu->rev.minor,
- adreno_gpu->rev.patchid);
+ printk("revision: %u (%"ADRENO_CHIPID_FMT")\n",
+ adreno_gpu->info->revn,
+ ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
@@ -1041,14 +1033,16 @@ int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
return PTR_ERR(ocmem);
}
- ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
+ ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem);
if (IS_ERR(ocmem_hdl))
return PTR_ERR(ocmem_hdl);
adreno_ocmem->ocmem = ocmem;
adreno_ocmem->base = ocmem_hdl->addr;
adreno_ocmem->hdl = ocmem_hdl;
- adreno_gpu->gmem = ocmem_hdl->len;
+
+ if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem))
+ return -ENOMEM;
return 0;
}
@@ -1073,13 +1067,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_platform_config *config = dev->platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
- struct adreno_rev *rev = &config->rev;
const char *gpu_name;
u32 speedbin;
int ret;
+ adreno_gpu->funcs = funcs;
+ adreno_gpu->info = config->info;
+ adreno_gpu->chip_id = config->chip_id;
+
+ gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
+
/* Only handle the core clock when GMU is not in use (or is absent). */
- if (adreno_has_gmu_wrapper(adreno_gpu) || config->rev.core < 6) {
+ if (adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_gpu->info->family < ADRENO_6XX_GEN1) {
/*
* This can only be done before devm_pm_opp_of_add_table(), or
* dev_pm_opp_set_config() will WARN_ON()
@@ -1095,24 +1095,14 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
devm_pm_opp_set_clkname(dev, "core");
}
- adreno_gpu->funcs = funcs;
- adreno_gpu->info = adreno_info(config->rev);
- adreno_gpu->gmem = adreno_gpu->info->gmem;
- adreno_gpu->revn = adreno_gpu->info->revn;
- adreno_gpu->rev = *rev;
-
if (adreno_read_speedbin(dev, &speedbin) || !speedbin)
speedbin = 0xffff;
adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin);
- gpu_name = adreno_gpu->info->name;
- if (!gpu_name) {
- gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%d.%d.%d.%d",
- rev->core, rev->major, rev->minor,
- rev->patchid);
- if (!gpu_name)
- return -ENOMEM;
- }
+ gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%"ADRENO_CHIPID_FMT,
+ ADRENO_CHIPID_ARGS(config->chip_id));
+ if (!gpu_name)
+ return -ENOMEM;
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 845019891ad1..80b3f6312116 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -29,21 +29,42 @@ enum {
ADRENO_FW_MAX,
};
+/**
+ * @enum adreno_family: identify generation and possibly sub-generation
+ *
+ * In some cases there are distinct sub-generations within a major revision
+ * so it helps to be able to group the GPU devices by generation and if
+ * necessary sub-generation.
+ */
+enum adreno_family {
+ ADRENO_2XX_GEN1, /* a20x */
+ ADRENO_2XX_GEN2, /* a22x */
+ ADRENO_3XX,
+ ADRENO_4XX,
+ ADRENO_5XX,
+ ADRENO_6XX_GEN1, /* a630 family */
+ ADRENO_6XX_GEN2, /* a640 family */
+ ADRENO_6XX_GEN3, /* a650 family */
+ ADRENO_6XX_GEN4, /* a660 family */
+ ADRENO_7XX_GEN1, /* a730 family */
+ ADRENO_7XX_GEN2, /* a740 family */
+};
+
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
+#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
+#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
-struct adreno_rev {
- uint8_t core;
- uint8_t major;
- uint8_t minor;
- uint8_t patchid;
-};
-
-#define ANY_ID 0xff
-
-#define ADRENO_REV(core, major, minor, patchid) \
- ((struct adreno_rev){ core, major, minor, patchid })
+/* Helper for formating the chip_id in the way that userspace tools like
+ * crashdec expect.
+ */
+#define ADRENO_CHIPID_FMT "u.%u.%u.%u"
+#define ADRENO_CHIPID_ARGS(_c) \
+ (((_c) >> 24) & 0xff), \
+ (((_c) >> 16) & 0xff), \
+ (((_c) >> 8) & 0xff), \
+ ((_c) & 0xff)
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
@@ -56,12 +77,23 @@ struct adreno_reglist {
};
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
-extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
+extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a730_hwcg[], a740_hwcg[];
+
+struct adreno_speedbin {
+ uint16_t fuse;
+ uint16_t speedbin;
+};
struct adreno_info {
- struct adreno_rev rev;
+ const char *machine;
+ /**
+ * @chipids: Table of matching chip-ids
+ *
+ * Terminated with 0 sentinal
+ */
+ uint32_t *chip_ids;
+ enum adreno_family family;
uint32_t revn;
- const char *name;
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
u64 quirks;
@@ -70,16 +102,39 @@ struct adreno_info {
u32 inactive_period;
const struct adreno_reglist *hwcg;
u64 address_space_size;
+ /**
+ * @speedbins: Optional table of fuse to speedbin mappings
+ *
+ * Consists of pairs of fuse, index mappings, terminated with
+ * {SHRT_MAX, 0} sentinal.
+ */
+ struct adreno_speedbin *speedbins;
};
-const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
+
+/*
+ * Helper to build a speedbin table, ie. the table:
+ * fuse | speedbin
+ * -----+---------
+ * 0 | 0
+ * 169 | 1
+ * 174 | 2
+ *
+ * would be declared as:
+ *
+ * .speedbins = ADRENO_SPEEDBINS(
+ * { 0, 0 },
+ * { 169, 1 },
+ * { 174, 2 },
+ * ),
+ */
+#define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} }
struct adreno_gpu {
struct msm_gpu base;
- struct adreno_rev rev;
const struct adreno_info *info;
- uint32_t gmem; /* actual gmem size */
- uint32_t revn; /* numeric revision name */
+ uint32_t chip_id;
uint16_t speedbin;
const struct adreno_gpu_funcs *funcs;
@@ -128,7 +183,8 @@ struct adreno_ocmem {
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
- struct adreno_rev rev;
+ uint32_t chip_id;
+ const struct adreno_info *info;
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -145,14 +201,21 @@ struct adreno_platform_config {
__ret; \
})
-bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
+static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu)
+{
+ /* It is probably ok to assume legacy "adreno_rev" format
+ * for all a6xx devices, but probably best to limit this
+ * to older things.
+ */
+ WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1);
+ return gpu->chip_id & 0xff;
+}
static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
{
- /* revn can be zero, but if not is set at same time as info */
- WARN_ON_ONCE(!gpu->info);
-
- return gpu->revn == revn;
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->revn == revn;
}
static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
@@ -162,18 +225,16 @@ static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
{
- /* revn can be zero, but if not is set at same time as info */
- WARN_ON_ONCE(!gpu->info);
-
- return (gpu->revn < 300);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family <= ADRENO_2XX_GEN2;
}
static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
{
- /* revn can be zero, but if not is set at same time as info */
- WARN_ON_ONCE(!gpu->info);
-
- return (gpu->revn < 210);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_2XX_GEN1;
}
static inline bool adreno_is_a225(const struct adreno_gpu *gpu)
@@ -204,7 +265,7 @@ static inline bool adreno_is_a330(const struct adreno_gpu *gpu)
static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu)
{
- return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+ return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0);
}
static inline int adreno_is_a405(const struct adreno_gpu *gpu)
@@ -294,8 +355,7 @@ static inline int adreno_is_a650(const struct adreno_gpu *gpu)
static inline int adreno_is_7c3(const struct adreno_gpu *gpu)
{
- /* The order of args is important here to handle ANY_ID correctly */
- return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
+ return gpu->info->chip_ids[0] == 0x06030500;
}
static inline int adreno_is_a660(const struct adreno_gpu *gpu)
@@ -310,35 +370,63 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu)
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{
- /* The order of args is important here to handle ANY_ID correctly */
- return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev);
-};
+ return gpu->info->chip_ids[0] == 0x06090000;
+}
-/* check for a615, a616, a618, a619 or any derivatives */
-static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
+/* check for a615, a616, a618, a619 or any a630 derivatives */
+static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
- return adreno_is_revn(gpu, 615) ||
- adreno_is_revn(gpu, 616) ||
- adreno_is_revn(gpu, 618) ||
- adreno_is_revn(gpu, 619);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN1;
}
static inline int adreno_is_a660_family(const struct adreno_gpu *gpu)
{
- return adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_7c3(gpu);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN4;
}
/* check for a650, a660, or any derivatives */
static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
{
- return adreno_is_revn(gpu, 650) ||
- adreno_is_revn(gpu, 620) ||
- adreno_is_a660_family(gpu);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN3 ||
+ gpu->info->family == ADRENO_6XX_GEN4;
}
static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
{
- return adreno_is_a640(gpu) || adreno_is_a680(gpu);
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN2;
+}
+
+static inline int adreno_is_a730(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x07030001;
+}
+
+static inline int adreno_is_a740(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x43050a01;
+}
+
+/* Placeholder to make future diffs smaller */
+static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_7XX_GEN2;
+}
+
+static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
+{
+ /* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
+ return gpu->info->family == ADRENO_7XX_GEN1 ||
+ adreno_is_a740_family(gpu);
}
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 7d0d0e74c3b0..aa1867943c9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -21,136 +21,253 @@ static const struct dpu_caps msm8998_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
-static const struct dpu_ubwc_cfg msm8998_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_10,
- .highest_bank_bit = 0x2,
-};
-
-static const struct dpu_mdp_cfg msm8998_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg msm8998_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x458,
.features = BIT(DPU_MDP_VSYNC_SEL),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 12 },
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 12 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
},
};
static const struct dpu_ctl_cfg msm8998_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x94,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x94,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x94,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0x94,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x1800, .len = 0x94,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
},
};
static const struct dpu_sspp_cfg msm8998_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1ac, VIG_MSM8998_MASK,
- msm8998_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1ac, VIG_MSM8998_MASK,
- msm8998_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1ac, VIG_MSM8998_MASK,
- msm8998_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1ac, VIG_MSM8998_MASK,
- msm8998_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1ac, DMA_MSM8998_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1ac, DMA_MSM8998_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1ac, DMA_CURSOR_MSM8998_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1ac, DMA_CURSOR_MSM8998_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg msm8998_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_2, LM_5, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_NONE, 0, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_NONE, 0, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_3, LM_2, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
};
static const struct dpu_pingpong_cfg msm8998_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
};
static const struct dpu_dsc_cfg msm8998_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
- DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
};
static const struct dpu_dspp_cfg msm8998_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &msm8998_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &msm8998_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
};
static const struct dpu_intf_cfg msm8998_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27)),
- INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29)),
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .type = INTF_HDMI,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg msm8998_perf_data = {
@@ -189,11 +306,15 @@ static const struct dpu_perf_cfg msm8998_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version msm8998_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_msm8998_cfg = {
+ .mdss_ver = &msm8998_mdss_ver,
.caps = &msm8998_dpu_caps,
- .ubwc = &msm8998_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(msm8998_mdp),
- .mdp = msm8998_mdp,
+ .mdp = &msm8998_mdp,
.ctl_count = ARRAY_SIZE(msm8998_ctl),
.ctl = msm8998_ctl,
.sspp_count = ARRAY_SIZE(msm8998_sspp),
@@ -211,14 +332,6 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
.vbif_count = ARRAY_SIZE(msm8998_vbif),
.vbif = msm8998_vbif,
.perf = &msm8998_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF2_INTR) | \
- BIT(MDP_INTF3_INTR) | \
- BIT(MDP_INTF4_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index b6098141bb9b..38ac0c1a134b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -21,140 +21,270 @@ static const struct dpu_caps sdm845_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
-static const struct dpu_ubwc_cfg sdm845_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .highest_bank_bit = 0x2,
-};
-
-static const struct dpu_mdp_cfg sdm845_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sdm845_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x45c,
.features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg sdm845_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0xe4,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0xe4,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0xe4,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0xe4,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x1800, .len = 0xe4,
- .features = 0,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xe4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xe4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
},
};
static const struct dpu_sspp_cfg sdm845_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1c8, VIG_SDM845_MASK_SDMA,
- sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1c8, VIG_SDM845_MASK_SDMA,
- sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1c8, VIG_SDM845_MASK_SDMA,
- sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1c8, VIG_SDM845_MASK_SDMA,
- sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1c8, DMA_SDM845_MASK_SDMA,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1c8, DMA_SDM845_MASK_SDMA,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1c8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1c8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_5, DSPP_2),
- LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_NONE, 0, DSPP_3),
- LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_NONE, 0, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x0, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x0, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
};
static const struct dpu_dspp_cfg sdm845_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sdm845_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SDM845_TE2_MASK, 0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SDM845_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
};
static const struct dpu_dsc_cfg sdm845_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
- DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
- DSC_BLK("dsc_2", DSC_2, 0x80800, 0),
- DSC_BLK("dsc_3", DSC_3, 0x80c00, 0),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ },
};
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 24, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27)),
- INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 24, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29)),
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SDM845_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg sdm845_perf_data = {
@@ -193,11 +323,15 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sdm845_mdss_ver = {
+ .core_major_ver = 4,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sdm845_cfg = {
+ .mdss_ver = &sdm845_mdss_ver,
.caps = &sdm845_dpu_caps,
- .ubwc = &sdm845_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sdm845_mdp),
- .mdp = sdm845_mdp,
+ .mdp = &sdm845_mdp,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
@@ -215,15 +349,6 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sdm845_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF2_INTR) | \
- BIT(MDP_INTF3_INTR) | \
- BIT(MDP_AD4_0_INTR) | \
- BIT(MDP_AD4_1_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index b5f751354267..9392ad2b4d3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -21,161 +21,316 @@ static const struct dpu_caps sm8150_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
-static const struct dpu_ubwc_cfg sm8150_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_30,
- .highest_bank_bit = 0x2,
-};
-
-static const struct dpu_mdp_cfg sm8150_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm8150_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x45c,
.features = BIT(DPU_MDP_AUDIO_SELECT),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm8150_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f0, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f0, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f0, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1f0, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sm8150_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sm8150_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm8150_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ },
};
static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
};
static const struct dpu_dsc_cfg sm8150_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
};
static const struct dpu_intf_cfg sm8150_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg sm8150_perf_data = {
@@ -207,11 +362,15 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm8150_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sm8150_cfg = {
+ .mdss_ver = &sm8150_mdss_ver,
.caps = &sm8150_dpu_caps,
- .ubwc = &sm8150_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm8150_mdp),
- .mdp = sm8150_mdp,
+ .mdp = &sm8150_mdp,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
.sspp_count = ARRAY_SIZE(sm8150_sspp),
@@ -231,17 +390,6 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm8150_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR) | \
- BIT(MDP_INTF2_INTR) | \
- BIT(MDP_INTF2_TEAR_INTR) | \
- BIT(MDP_INTF3_INTR) | \
- BIT(MDP_AD4_0_INTR) | \
- BIT(MDP_AD4_1_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 8ed2b263c5ea..e07f4c8c25b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -21,169 +21,343 @@ static const struct dpu_caps sc8180x_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
-static const struct dpu_ubwc_cfg sc8180x_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_30,
- .highest_bank_bit = 0x3,
-};
-
-static const struct dpu_mdp_cfg sc8180x_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sc8180x_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x45c,
.features = BIT(DPU_MDP_AUDIO_SELECT),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sc8180x_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1f0, VIG_SDM845_MASK,
- sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f0, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f0, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f0, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1f0, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sc8180x_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sc8180x_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sc8180x_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ },
};
static const struct dpu_merge_3d_cfg sc8180x_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
};
static const struct dpu_dsc_cfg sc8180x_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_4", DSC_4, 0x81000, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_5", DSC_5, 0x81400, BIT(DPU_DSC_OUTPUT_CTRL)),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_4", .id = DSC_4,
+ .base = 0x81000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_5", .id = DSC_5,
+ .base = 0x81400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
};
static const struct dpu_intf_cfg sc8180x_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2bc, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2bc, INTF_DSI, 1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ },
/* INTF_3 is for MST, wired to INTF_DP 0 and 1, use dummy index until this is supported */
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 999, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
- INTF_BLK("intf_4", INTF_4, 0x6c000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21)),
- INTF_BLK("intf_5", INTF_5, 0x6c800, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
+ {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = 999,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x6c000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x6c800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ },
};
static const struct dpu_perf_cfg sc8180x_perf_data = {
@@ -213,11 +387,15 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sc8180x_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 1,
+};
+
const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
+ .mdss_ver = &sc8180x_mdss_ver,
.caps = &sc8180x_dpu_caps,
- .ubwc = &sc8180x_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sc8180x_mdp),
- .mdp = sc8180x_mdp,
+ .mdp = &sc8180x_mdp,
.ctl_count = ARRAY_SIZE(sc8180x_ctl),
.ctl = sc8180x_ctl,
.sspp_count = ARRAY_SIZE(sc8180x_sspp),
@@ -237,19 +415,6 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sc8180x_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR) | \
- BIT(MDP_INTF2_INTR) | \
- BIT(MDP_INTF2_TEAR_INTR) | \
- BIT(MDP_INTF3_INTR) | \
- BIT(MDP_INTF4_INTR) | \
- BIT(MDP_INTF5_INTR) | \
- BIT(MDP_AD4_0_INTR) | \
- BIT(MDP_AD4_1_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
new file mode 100644
index 000000000000..cec7af6667dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Marijn Suijten <marijn.suijten@somainline.org>. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_5_4_SM6125_H
+#define _DPU_5_4_SM6125_H
+
+static const struct dpu_caps sm6125_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x6,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm6125_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = 0,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6125_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6125_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SM6125_MASK,
+ .sblk = &sm6125_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ },
+};
+
+static const struct dpu_lm_cfg sm6125_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ .lm_pair = LM_1,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_1,
+ .dspp = 0,
+ .lm_pair = LM_0,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6125_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6125_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .merge_3d = 0,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .merge_3d = 0,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ },
+};
+
+static const struct dpu_intf_cfg sm6125_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = 0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg sm6125_perf_data = {
+ .max_bw_low = 4100000,
+ .max_bw_high = 4100000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6125_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 4,
+};
+
+const struct dpu_mdss_cfg dpu_sm6125_cfg = {
+ .mdss_ver = &sm6125_mdss_ver,
+ .caps = &sm6125_dpu_caps,
+ .mdp = &sm6125_mdp,
+ .ctl_count = ARRAY_SIZE(sm6125_ctl),
+ .ctl = sm6125_ctl,
+ .sspp_count = ARRAY_SIZE(sm6125_sspp),
+ .sspp = sm6125_sspp,
+ .mixer_count = ARRAY_SIZE(sm6125_lm),
+ .mixer = sm6125_lm,
+ .dspp_count = ARRAY_SIZE(sm6125_dspp),
+ .dspp = sm6125_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6125_pp),
+ .pingpong = sm6125_pp,
+ .intf_count = ARRAY_SIZE(sm6125_intf),
+ .intf = sm6125_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6125_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index daebd2170041..94278a3e3483 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -19,169 +19,332 @@ static const struct dpu_caps sm8250_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm8250_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_40,
- .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
- .ubwc_swizzle = 0x6,
-};
-
-static const struct dpu_mdp_cfg sm8250_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm8250_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
- .clk_ctrls[DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm8250_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK_SDMA,
- sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1f8, VIG_SC7180_MASK_SDMA,
- sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1f8, VIG_SC7180_MASK_SDMA,
- sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1f8, VIG_SC7180_MASK_SDMA,
- sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK_SDMA,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_SDM845_MASK_SDMA,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1f8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sm8250_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sm8250_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm8250_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, MERGE_3D_0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, PINGPONG_SM8150_MASK, MERGE_3D_1, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK("pingpong_4", PINGPONG_4, 0x72000, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK("pingpong_5", PINGPONG_5, 0x72800, PINGPONG_SM8150_MASK, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ },
};
static const struct dpu_merge_3d_cfg sm8250_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
};
static const struct dpu_dsc_cfg sm8250_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
- DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
};
static const struct dpu_intf_cfg sm8250_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x6b000, 0x2c0, INTF_DSI, 1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_DP, 1, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_wb_cfg sm8250_wb[] = {
- WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
- VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
};
static const struct dpu_perf_cfg sm8250_perf_data = {
@@ -213,11 +376,15 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm8250_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sm8250_cfg = {
+ .mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
- .ubwc = &sm8250_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm8250_mdp),
- .mdp = sm8250_mdp,
+ .mdp = &sm8250_mdp,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
@@ -239,16 +406,6 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.wb_count = ARRAY_SIZE(sm8250_wb),
.wb = sm8250_wb,
.perf = &sm8250_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR) | \
- BIT(MDP_INTF2_INTR) | \
- BIT(MDP_INTF2_TEAR_INTR) | \
- BIT(MDP_INTF3_INTR) | \
- BIT(MDP_INTF4_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 67566b07195a..c0d88ddccb28 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -17,90 +17,155 @@ static const struct dpu_caps sc7180_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sc7180_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .highest_bank_bit = 0x3,
-};
-
-static const struct dpu_mdp_cfg sc7180_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sc7180_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
},
};
static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
};
static const struct dpu_sspp_cfg sc7180_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
- sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sc7180_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
};
static const struct dpu_lm_cfg sc7180_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
};
static const struct dpu_dspp_cfg sc7180_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sc7180_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ },
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
};
static const struct dpu_wb_cfg sc7180_wb[] = {
- WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
- VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
};
static const struct dpu_perf_cfg sc7180_perf_data = {
@@ -131,11 +196,15 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sc7180_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 2,
+};
+
const struct dpu_mdss_cfg dpu_sc7180_cfg = {
+ .mdss_ver = &sc7180_mdss_ver,
.caps = &sc7180_dpu_caps,
- .ubwc = &sc7180_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sc7180_mdp),
- .mdp = sc7180_mdp,
+ .mdp = &sc7180_mdp,
.ctl_count = ARRAY_SIZE(sc7180_ctl),
.ctl = sc7180_ctl,
.sspp_count = ARRAY_SIZE(sc7180_sspp),
@@ -153,12 +222,6 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sc7180_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 031fc8dae3c6..57ce14c18def 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -17,59 +17,87 @@ static const struct dpu_caps sm6115_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm6115_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_10,
- .highest_bank_bit = 0x1,
- .ubwc_swizzle = 0x7,
-};
-
-static const struct dpu_mdp_cfg sm6115_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm6115_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg sm6115_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
static const struct dpu_sspp_cfg sm6115_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
- sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm6115_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
};
static const struct dpu_lm_cfg sm6115_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_QCM2290_MASK,
- &qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
};
static const struct dpu_dspp_cfg sm6115_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm6115_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ },
};
static const struct dpu_intf_cfg sm6115_intf[] = {
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
};
static const struct dpu_perf_cfg sm6115_perf_data = {
@@ -101,11 +129,15 @@ static const struct dpu_perf_cfg sm6115_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm6115_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 3,
+};
+
const struct dpu_mdss_cfg dpu_sm6115_cfg = {
+ .mdss_ver = &sm6115_mdss_ver,
.caps = &sm6115_dpu_caps,
- .ubwc = &sm6115_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm6115_mdp),
- .mdp = sm6115_mdp,
+ .mdp = &sm6115_mdp,
.ctl_count = ARRAY_SIZE(sm6115_ctl),
.ctl = sm6115_ctl,
.sspp_count = ARRAY_SIZE(sm6115_sspp),
@@ -121,11 +153,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm6115_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 06eba23b0236..62db84bd15f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -19,96 +19,154 @@ static const struct dpu_caps sm6350_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm6350_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .ubwc_swizzle = 6,
- .highest_bank_bit = 1,
-};
-
-static const struct dpu_mdp_cfg sm6350_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm6350_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
static const struct dpu_ctl_cfg sm6350_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x1600, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
static const struct dpu_sspp_cfg sm6350_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
- sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sc7180_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
};
static const struct dpu_lm_cfg sm6350_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = 0,
+ },
};
static const struct dpu_dspp_cfg sm6350_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static struct dpu_pingpong_cfg sm6350_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x70800, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ },
};
static const struct dpu_dsc_cfg sm6350_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
};
static const struct dpu_intf_cfg sm6350_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 35, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 35, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 35,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 35,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
};
static const struct dpu_perf_cfg sm6350_perf_data = {
@@ -140,11 +198,15 @@ static const struct dpu_perf_cfg sm6350_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm6350_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 4,
+};
+
const struct dpu_mdss_cfg dpu_sm6350_cfg = {
+ .mdss_ver = &sm6350_mdss_ver,
.caps = &sm6350_dpu_caps,
- .ubwc = &sm6350_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm6350_mdp),
- .mdp = sm6350_mdp,
+ .mdp = &sm6350_mdp,
.ctl_count = ARRAY_SIZE(sm6350_ctl),
.ctl = sm6350_ctl,
.sspp_count = ARRAY_SIZE(sm6350_sspp),
@@ -162,12 +224,6 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm6350_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index f2808098af39..fb36fba5171c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -16,57 +16,87 @@ static const struct dpu_caps qcm2290_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg qcm2290_ubwc_cfg = {
- .highest_bank_bit = 0x2,
-};
-
-static const struct dpu_mdp_cfg qcm2290_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg qcm2290_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg qcm2290_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
static const struct dpu_sspp_cfg qcm2290_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_QCM2290_MASK,
- qcm2290_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- qcm2290_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_QCM2290_MASK,
+ .sblk = &qcm2290_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &qcm2290_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
};
static const struct dpu_lm_cfg qcm2290_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_QCM2290_MASK,
- &qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
};
static const struct dpu_dspp_cfg qcm2290_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg qcm2290_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ },
};
static const struct dpu_intf_cfg qcm2290_intf[] = {
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
};
static const struct dpu_perf_cfg qcm2290_perf_data = {
@@ -91,11 +121,15 @@ static const struct dpu_perf_cfg qcm2290_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version qcm2290_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 5,
+};
+
const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
+ .mdss_ver = &qcm2290_mdss_ver,
.caps = &qcm2290_dpu_caps,
- .ubwc = &qcm2290_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(qcm2290_mdp),
- .mdp = qcm2290_mdp,
+ .mdp = &qcm2290_mdp,
.ctl_count = ARRAY_SIZE(qcm2290_ctl),
.ctl = qcm2290_ctl,
.sspp_count = ARRAY_SIZE(qcm2290_sspp),
@@ -111,11 +145,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &qcm2290_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index 241fa6746674..5a3aad364c78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -18,63 +18,96 @@ static const struct dpu_caps sm6375_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm6375_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .ubwc_swizzle = 6,
- .highest_bank_bit = 1,
-};
-
-static const struct dpu_mdp_cfg sm6375_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm6375_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg sm6375_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0x1dc,
- .features = BIT(DPU_CTL_ACTIVE_CFG),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
};
static const struct dpu_sspp_cfg sm6375_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
- sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm6115_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
};
static const struct dpu_lm_cfg sm6375_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_QCM2290_MASK,
- &qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .lm_pair = 0,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
};
static const struct dpu_dspp_cfg sm6375_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm6375_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x70000, PINGPONG_SM8150_MASK, 0, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ },
};
static const struct dpu_dsc_cfg sm6375_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
};
static const struct dpu_intf_cfg sm6375_intf[] = {
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x6a800, 0x2c0, INTF_DSI, 0, 24, INTF_SC7180_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2)),
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
};
static const struct dpu_perf_cfg sm6375_perf_data = {
@@ -106,11 +139,15 @@ static const struct dpu_perf_cfg sm6375_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm6375_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 9,
+};
+
const struct dpu_mdss_cfg dpu_sm6375_cfg = {
+ .mdss_ver = &sm6375_mdss_ver,
.caps = &sm6375_dpu_caps,
- .ubwc = &sm6375_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm6375_mdp),
- .mdp = sm6375_mdp,
+ .mdp = &sm6375_mdp,
.ctl_count = ARRAY_SIZE(sm6375_ctl),
.ctl = sm6375_ctl,
.sspp_count = ARRAY_SIZE(sm6375_sspp),
@@ -128,11 +165,6 @@ const struct dpu_mdss_cfg dpu_sm6375_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm6375_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF1_INTR) | \
- BIT(MDP_INTF1_TEAR_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 8da424eaee6a..1709ba57f384 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -19,138 +19,254 @@ static const struct dpu_caps sm8350_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm8350_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_40,
- .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
-};
-
-static const struct dpu_mdp_cfg sm8350_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm8350_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
- .features = 0,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x15000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x16000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x19000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm8350_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7180_MASK,
- sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x1f8, VIG_SC7180_MASK,
- sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x1f8, VIG_SC7180_MASK,
- sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x1f8, VIG_SC7180_MASK,
- sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x1f8, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sm8350_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sm8350_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm8350_pp[] = {
- PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ },
};
static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ },
};
/*
@@ -159,27 +275,69 @@ static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
* its own different sub block address.
*/
static const struct dpu_dsc_cfg sm8350_dsc[] = {
- DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
- DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
- DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
- DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
};
static const struct dpu_intf_cfg sm8350_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x2c4, INTF_DSI, 1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg sm8350_perf_data = {
@@ -212,11 +370,15 @@ static const struct dpu_perf_cfg sm8350_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm8350_mdss_ver = {
+ .core_major_ver = 7,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sm8350_cfg = {
+ .mdss_ver = &sm8350_mdss_ver,
.caps = &sm8350_dpu_caps,
- .ubwc = &sm8350_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm8350_mdp),
- .mdp = sm8350_mdp,
+ .mdp = &sm8350_mdp,
.ctl_count = ARRAY_SIZE(sm8350_ctl),
.ctl = sm8350_ctl,
.sspp_count = ARRAY_SIZE(sm8350_sspp),
@@ -236,15 +398,6 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm8350_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF2_7xxx_INTR) | \
- BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF3_7xxx_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 900fee410e11..15942fa5a8e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -17,112 +17,199 @@ static const struct dpu_caps sc7280_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sc7280_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_30,
- .highest_bank_bit = 0x1,
- .ubwc_swizzle = 0x6,
-};
-
-static const struct dpu_mdp_cfg sc7280_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sc7280_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x2014,
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
},
};
static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x15000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x16000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x17000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x18000, .len = 0x1e8,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
static const struct dpu_sspp_cfg sc7280_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x1f8, VIG_SC7280_MASK_SDMA,
- sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x1f8, DMA_SDM845_MASK_SDMA,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x1f8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x1f8, DMA_CURSOR_SDM845_MASK_SDMA,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7280_MASK_SDMA,
+ .sblk = &sc7280_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
};
static const struct dpu_lm_cfg sc7280_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_0, 0, DSPP_0),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sc7180_lm_sblk, PINGPONG_3, LM_2, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
};
static const struct dpu_dspp_cfg sc7280_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
- PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ },
};
/* NOTE: sc7280 only has one DSC hard slice encoder */
static const struct dpu_dsc_cfg sc7280_dsc[] = {
- DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ },
};
static const struct dpu_wb_cfg sc7280_wb[] = {
- WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
- VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
};
static const struct dpu_intf_cfg sc7280_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x2c4, INTF_DSI, 0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
- INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ },
};
static const struct dpu_perf_cfg sc7280_perf_data = {
@@ -153,11 +240,15 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sc7280_mdss_ver = {
+ .core_major_ver = 7,
+ .core_minor_ver = 2,
+};
+
const struct dpu_mdss_cfg dpu_sc7280_cfg = {
+ .mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
- .ubwc = &sc7280_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sc7280_mdp),
- .mdp = sc7280_mdp,
+ .mdp = &sc7280_mdp,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
@@ -177,13 +268,6 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sc7280_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF5_7xxx_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index f6ce6b090f71..4c0528794e7a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -19,127 +19,257 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sc8280xp_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_40,
- .highest_bank_bit = 2,
- .ubwc_swizzle = 6,
-};
-
-static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sc8280xp_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
.features = BIT(DPU_MDP_PERIPH_0_REMOVED),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x2ac, VIG_SC7180_MASK,
- sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x2ac, VIG_SC7180_MASK,
- sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x2ac, VIG_SC7180_MASK,
- sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x2ac, VIG_SC7180_MASK,
- sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x2ac, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x2ac, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x2ac, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x2ac, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x2ac,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x2ac,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x2ac,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x2ac,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sc8280xp_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_2, LM_3, DSPP_2),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_3, LM_2, DSPP_3),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
- PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
- PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
- PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
- PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
- PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
- PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ },
};
static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ },
};
/*
@@ -148,45 +278,125 @@ static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
* its own different sub block address.
*/
static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
- DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
- DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
- DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
- DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
- DSC_BLK_1_2("dce_2_0", DSC_4, 0x82000, 0x29c, 0, dsc_sblk_0),
- DSC_BLK_1_2("dce_2_1", DSC_5, 0x82000, 0x29c, 0, dsc_sblk_1),
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
};
/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
static const struct dpu_intf_cfg sc8280xp_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
- INTF_BLK("intf_4", INTF_4, 0x38000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21)),
- INTF_BLK("intf_5", INTF_5, 0x39000, 0x280, INTF_DP, MSM_DP_CONTROLLER_3, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23)),
- INTF_BLK("intf_6", INTF_6, 0x3a000, 0x280, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17)),
- INTF_BLK("intf_7", INTF_7, 0x3b000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19)),
- INTF_BLK("intf_8", INTF_8, 0x3c000, 0x280, INTF_NONE, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_3,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3a000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
};
static const struct dpu_perf_cfg sc8280xp_perf_data = {
@@ -196,6 +406,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
.min_llcc_ib = 0,
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc8180x_qos_linear),
.entries = sc8180x_qos_linear
@@ -216,11 +427,15 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sc8280xp_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
+ .mdss_ver = &sc8280xp_mdss_ver,
.caps = &sc8280xp_dpu_caps,
- .ubwc = &sc8280xp_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sc8280xp_mdp),
- .mdp = sc8280xp_mdp,
+ .mdp = &sc8280xp_mdp,
.ctl_count = ARRAY_SIZE(sc8280xp_ctl),
.ctl = sc8280xp_ctl,
.sspp_count = ARRAY_SIZE(sc8280xp_sspp),
@@ -240,20 +455,6 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sc8280xp_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF2_7xxx_INTR) | \
- BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF3_7xxx_INTR) | \
- BIT(MDP_INTF4_7xxx_INTR) | \
- BIT(MDP_INTF5_7xxx_INTR) | \
- BIT(MDP_INTF6_7xxx_INTR) | \
- BIT(MDP_INTF7_7xxx_INTR) | \
- BIT(MDP_INTF8_7xxx_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 8d13c369213c..7742f52be859 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -19,146 +19,272 @@ static const struct dpu_caps sm8450_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm8450_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_40,
- .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
- .ubwc_swizzle = 0x6,
-};
-
-static const struct dpu_mdp_cfg sm8450_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm8450_mdp = {
+ .name = "top_0",
.base = 0x0, .len = 0x494,
.features = BIT(DPU_MDP_PERIPH_0_REMOVED),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x17000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x18000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x19000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a000, .len = 0x204,
- .features = CTL_SC7280_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm8450_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x32c, VIG_SC7180_MASK,
- sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x32c, VIG_SC7180_MASK,
- sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x32c, VIG_SC7180_MASK,
- sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x32c, VIG_SC7180_MASK,
- sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x32c, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x32c, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x32c, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x32c, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x32c,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x32c,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
};
static const struct dpu_lm_cfg sm8450_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, DSPP_2),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, DSPP_3),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sm8450_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm8450_pp[] = {
- PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
- PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk,
- -1,
- -1),
- PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk,
- -1,
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x65800, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x65c00, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
};
static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
- MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x65f00),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x65f00, .len = 0x8,
+ },
};
/*
@@ -167,27 +293,69 @@ static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
* its own different sub block address.
*/
static const struct dpu_dsc_cfg sm8450_dsc[] = {
- DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
- DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
- DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
- DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
};
static const struct dpu_intf_cfg sm8450_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg sm8450_perf_data = {
@@ -220,11 +388,15 @@ static const struct dpu_perf_cfg sm8450_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm8450_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 1,
+};
+
const struct dpu_mdss_cfg dpu_sm8450_cfg = {
+ .mdss_ver = &sm8450_mdss_ver,
.caps = &sm8450_dpu_caps,
- .ubwc = &sm8450_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm8450_mdp),
- .mdp = sm8450_mdp,
+ .mdp = &sm8450_mdp,
.ctl_count = ARRAY_SIZE(sm8450_ctl),
.ctl = sm8450_ctl,
.sspp_count = ARRAY_SIZE(sm8450_sspp),
@@ -244,15 +416,6 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm8450_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF2_7xxx_INTR) | \
- BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF3_7xxx_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index f17b9a7fee85..69b80af6566a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -19,150 +19,267 @@ static const struct dpu_caps sm8550_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
-static const struct dpu_ubwc_cfg sm8550_ubwc_cfg = {
- .ubwc_version = DPU_HW_UBWC_VER_40,
- .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
-};
-
-static const struct dpu_mdp_cfg sm8550_mdp[] = {
- {
- .name = "top_0", .id = MDP_TOP,
+static const struct dpu_mdp_cfg sm8550_mdp = {
+ .name = "top_0",
.base = 0, .len = 0x494,
.features = BIT(DPU_MDP_PERIPH_0_REMOVED),
- .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x4330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x6330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x8330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0xa330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x24330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x26330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x28330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2a330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA4] = { .reg_off = 0x2c330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_DMA5] = { .reg_off = 0x2e330, .bit_off = 0 },
- .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
- .name = "ctl_0", .id = CTL_0,
- .base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
- },
- {
- .name = "ctl_1", .id = CTL_1,
- .base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
- },
- {
- .name = "ctl_2", .id = CTL_2,
- .base = 0x17000, .len = 0x290,
- .features = CTL_SM8550_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
- },
- {
- .name = "ctl_3", .id = CTL_3,
- .base = 0x18000, .len = 0x290,
- .features = CTL_SM8550_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
- },
- {
- .name = "ctl_4", .id = CTL_4,
- .base = 0x19000, .len = 0x290,
- .features = CTL_SM8550_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
- },
- {
- .name = "ctl_5", .id = CTL_5,
- .base = 0x1a000, .len = 0x290,
- .features = CTL_SM8550_MASK,
- .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm8550_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, 0x344, VIG_SC7180_MASK,
- sm8550_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, 0x344, VIG_SC7180_MASK,
- sm8550_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, 0x344, VIG_SC7180_MASK,
- sm8550_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, 0x344, VIG_SC7180_MASK,
- sm8550_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
- SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, 0x344, DMA_SDM845_MASK,
- sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, 0x344, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
- SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, 0x344, DMA_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
- SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, 0x344, DMA_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
- SSPP_BLK("sspp_12", SSPP_DMA4, 0x2c000, 0x344, DMA_CURSOR_SDM845_MASK,
- sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA4),
- SSPP_BLK("sspp_13", SSPP_DMA5, 0x2e000, 0x344, DMA_CURSOR_SDM845_MASK,
- sm8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA5),
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sm8550_dma_sblk_4,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sm8550_dma_sblk_5,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
};
static const struct dpu_lm_cfg sm8550_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
- LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
- LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
};
static const struct dpu_dspp_cfg sm8550_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
- &sm8150_dspp_sblk),
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
};
static const struct dpu_pingpong_cfg sm8550_pp[] = {
- PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
- -1),
- PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
- -1),
- PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
- -1),
- PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
- -1),
- PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
- -1),
- PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
- -1),
- PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
- -1,
- -1),
- PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
- -1,
- -1),
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
};
static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
- MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
- MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
- MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
- MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x66700),
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
};
/*
@@ -171,27 +288,83 @@ static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
* its own different sub block address.
*/
static const struct dpu_dsc_cfg sm8550_dsc[] = {
- DSC_BLK_1_2("dce_0_0", DSC_0, 0x80000, 0x29c, 0, dsc_sblk_0),
- DSC_BLK_1_2("dce_0_1", DSC_1, 0x80000, 0x29c, 0, dsc_sblk_1),
- DSC_BLK_1_2("dce_1_0", DSC_2, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_0),
- DSC_BLK_1_2("dce_1_1", DSC_3, 0x81000, 0x29c, BIT(DPU_DSC_NATIVE_42x_EN), dsc_sblk_1),
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8550_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
};
static const struct dpu_intf_cfg sm8550_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, 0x280, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25)),
- INTF_BLK_DSI_TE("intf_1", INTF_1, 0x35000, 0x300, INTF_DSI, 0, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- DPU_IRQ_IDX(MDP_INTF1_7xxx_TEAR_INTR, 2)),
- INTF_BLK_DSI_TE("intf_2", INTF_2, 0x36000, 0x300, INTF_DSI, 1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- DPU_IRQ_IDX(MDP_INTF2_7xxx_TEAR_INTR, 2)),
- INTF_BLK("intf_3", INTF_3, 0x37000, 0x280, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31)),
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
};
static const struct dpu_perf_cfg sm8550_perf_data = {
@@ -224,11 +397,15 @@ static const struct dpu_perf_cfg sm8550_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_mdss_version sm8550_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 0,
+};
+
const struct dpu_mdss_cfg dpu_sm8550_cfg = {
+ .mdss_ver = &sm8550_mdss_ver,
.caps = &sm8550_dpu_caps,
- .ubwc = &sm8550_ubwc_cfg,
- .mdp_count = ARRAY_SIZE(sm8550_mdp),
- .mdp = sm8550_mdp,
+ .mdp = &sm8550_mdp,
.ctl_count = ARRAY_SIZE(sm8550_ctl),
.ctl = sm8550_ctl,
.sspp_count = ARRAY_SIZE(sm8550_sspp),
@@ -243,20 +420,13 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.dsc = sm8550_dsc,
.merge_3d_count = ARRAY_SIZE(sm8550_merge_3d),
.merge_3d = sm8550_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8550_wb),
+ .wb = sm8550_wb,
.intf_count = ARRAY_SIZE(sm8550_intf),
.intf = sm8550_intf,
- .vbif_count = ARRAY_SIZE(sdm845_vbif),
- .vbif = sdm845_vbif,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
.perf = &sm8550_perf_data,
- .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
- BIT(MDP_SSPP_TOP0_INTR2) | \
- BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_INTR) | \
- BIT(MDP_INTF1_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF2_7xxx_INTR) | \
- BIT(MDP_INTF2_7xxx_TEAR_INTR) | \
- BIT(MDP_INTF3_7xxx_INTR),
};
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index b5b6e7031fb9..7c286bafb948 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -37,7 +37,7 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms);
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
- int irq_idx);
+ unsigned int irq_idx);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
@@ -52,8 +52,8 @@ u32 dpu_core_irq_read(
*/
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
- int irq_idx,
- void (*irq_cb)(void *arg, int irq_idx),
+ unsigned int irq_idx,
+ void (*irq_cb)(void *arg),
void *irq_arg);
/**
@@ -67,7 +67,7 @@ int dpu_core_irq_register_callback(
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
- int irq_idx);
+ unsigned int irq_idx);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 1d9d83d7b99e..ef871239adb2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -33,11 +33,11 @@ enum dpu_perf_mode {
/**
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
- * @kms: pointer to the dpu_kms
+ * @perf_cfg: performance configuration
* @crtc: pointer to a crtc
* Return: returns aggregated BW for all planes in crtc.
*/
-static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
+static u64 _dpu_core_perf_calc_bw(const struct dpu_perf_cfg *perf_cfg,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
@@ -53,7 +53,7 @@ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
crtc_plane_bw += pstate->plane_fetch_bw;
}
- bw_factor = kms->catalog->perf->bw_inefficiency_factor;
+ bw_factor = perf_cfg->bw_inefficiency_factor;
if (bw_factor) {
crtc_plane_bw *= bw_factor;
do_div(crtc_plane_bw, 100);
@@ -64,12 +64,12 @@ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
/**
* _dpu_core_perf_calc_clk() - to calculate clock per crtc
- * @kms: pointer to the dpu_kms
+ * @perf_cfg: performance configuration
* @crtc: pointer to a crtc
* @state: pointer to a crtc state
* Return: returns max clk for all planes in crtc.
*/
-static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
+static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct drm_plane *plane;
@@ -90,7 +90,7 @@ static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
crtc_clk = max(pstate->plane_clk, crtc_clk);
}
- clk_factor = kms->catalog->perf->clk_inefficiency_factor;
+ clk_factor = perf_cfg->clk_inefficiency_factor;
if (clk_factor) {
crtc_clk *= clk_factor;
do_div(crtc_clk, 100);
@@ -106,30 +106,32 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
- struct drm_crtc *crtc,
- struct drm_crtc_state *state,
- struct dpu_core_perf_params *perf)
+static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
{
- if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ const struct dpu_perf_cfg *perf_cfg = core_perf->perf_cfg;
+
+ if (!perf_cfg || !crtc || !state || !perf) {
DPU_ERROR("invalid parameters\n");
return;
}
memset(perf, 0, sizeof(struct dpu_core_perf_params));
- if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
perf->bw_ctl = 0;
perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
- } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
- perf->bw_ctl = kms->perf.fix_core_ab_vote;
- perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
- perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ perf->bw_ctl = core_perf->fix_core_ab_vote;
+ perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
+ perf->core_clk_rate = core_perf->fix_core_clk_rate;
} else {
- perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
- perf->max_per_pipe_ib = kms->catalog->perf->min_dram_ib;
- perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
}
DRM_DEBUG_ATOMIC(
@@ -154,10 +156,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms->catalog) {
- DPU_ERROR("invalid parameters\n");
- return 0;
- }
/* we only need bandwidth check on real-time clients (interfaces) */
if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
@@ -166,30 +164,30 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
dpu_cstate = to_dpu_crtc_state(state);
/* obtain new values */
- _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+ _dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
- (dpu_crtc_get_client_type(tmp_crtc) ==
- curr_client_type) && (tmp_crtc != crtc)) {
+ dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
+ tmp_crtc != crtc) {
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
- tmp_cstate->bw_control);
+ tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
+ tmp_cstate->bw_control);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
/* convert bandwidth to kb */
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->catalog->perf->max_bw_high;
+ threshold = kms->perf.perf_cfg->max_bw_high;
DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
@@ -217,6 +215,9 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
int i, ret = 0;
u64 avg_bw;
+ if (!kms->num_paths)
+ return 0;
+
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
curr_client_type ==
@@ -234,9 +235,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
}
}
- if (!kms->num_paths)
- return 0;
-
avg_bw = perf.bw_ctl;
do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
@@ -265,11 +263,6 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms->catalog) {
- DPU_ERROR("invalid kms\n");
- return;
- }
-
dpu_crtc = to_dpu_crtc(crtc);
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
@@ -286,30 +279,30 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
{
- u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ u64 clk_rate;
struct drm_crtc *crtc;
struct dpu_crtc_state *dpu_cstate;
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ return kms->perf.fix_core_clk_rate;
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM)
+ return kms->perf.max_core_clk_rate;
+
+ clk_rate = 0;
drm_for_each_crtc(crtc, kms->dev) {
if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
- clk_rate = clk_round_rate(kms->perf.core_clk,
- clk_rate);
}
}
- if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
- clk_rate = kms->perf.fix_core_clk_rate;
-
- DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
-
return clk_rate;
}
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
- int params_changed, bool stop_req)
+ int params_changed)
{
struct dpu_core_perf_params *new, *old;
bool update_bus = false, update_clk = false;
@@ -325,21 +318,17 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
- if (!kms->catalog) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
- DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n",
- crtc->base.id, stop_req, kms->perf.core_clk_rate);
+ DRM_DEBUG_ATOMIC("crtc:%d enabled:%d core_clk:%llu\n",
+ crtc->base.id, crtc->enabled, kms->perf.core_clk_rate);
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
- if (crtc->enabled && !stop_req) {
+ if (crtc->enabled) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
@@ -359,10 +348,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_bus = true;
}
- if ((params_changed &&
- (new->core_clk_rate > old->core_clk_rate)) ||
- (!params_changed &&
- (new->core_clk_rate < old->core_clk_rate))) {
+ if ((params_changed && new->core_clk_rate > old->core_clk_rate) ||
+ (!params_changed && new->core_clk_rate < old->core_clk_rate)) {
old->core_clk_rate = new->core_clk_rate;
update_clk = true;
}
@@ -374,7 +361,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
- new->core_clk_rate, stop_req, update_bus, update_clk);
+ new->core_clk_rate, !crtc->enabled, update_bus, update_clk);
if (update_bus) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
@@ -392,7 +379,9 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
if (update_clk) {
clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
- trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+ DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
+
+ trace_dpu_core_perf_update_clk(kms->dev, !crtc->enabled, clk_rate);
clk_rate = min(clk_rate, kms->perf.max_core_clk_rate);
ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate);
@@ -413,7 +402,6 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- const struct dpu_perf_cfg *cfg = perf->catalog->perf;
u32 perf_mode = 0;
int ret;
@@ -428,14 +416,9 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
DRM_INFO("fix performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
/* run the driver with max clk and BW vote */
- perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
- perf->perf_tune.min_bus_vote =
- (u64) cfg->max_bw_high * 1000;
DRM_INFO("minimum performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
/* reset the perf tune params to 0 */
- perf->perf_tune.min_core_clk = 0;
- perf->perf_tune.min_bus_vote = 0;
DRM_INFO("normal performance mode\n");
}
perf->perf_tune.mode = perf_mode;
@@ -451,10 +434,8 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
char buf[128];
len = scnprintf(buf, sizeof(buf),
- "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
- perf->perf_tune.mode,
- perf->perf_tune.min_core_clk,
- perf->perf_tune.min_bus_vote);
+ "mode %d\n",
+ perf->perf_tune.mode);
return simple_read_from_buffer(buff, count, ppos, buf, len);
}
@@ -468,7 +449,6 @@ static const struct file_operations dpu_core_perf_mode_fops = {
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
- const struct dpu_mdss_cfg *catalog = perf->catalog;
struct dentry *entry;
entry = debugfs_create_dir("core_perf", parent);
@@ -480,15 +460,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, entry,
- (u32 *)&catalog->perf->max_bw_low);
+ (u32 *)&perf->perf_cfg->max_bw_low);
debugfs_create_u32("threshold_high", 0600, entry,
- (u32 *)&catalog->perf->max_bw_high);
+ (u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0600, entry,
- (u32 *)&catalog->perf->min_core_ib);
+ (u32 *)&perf->perf_cfg->min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, entry,
- (u32 *)&catalog->perf->min_llcc_ib);
+ (u32 *)&perf->perf_cfg->min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, entry,
- (u32 *)&catalog->perf->min_dram_ib);
+ (u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
@@ -502,33 +482,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
}
#endif
-void dpu_core_perf_destroy(struct dpu_core_perf *perf)
-{
- if (!perf) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- perf->max_core_clk_rate = 0;
- perf->core_clk = NULL;
- perf->catalog = NULL;
- perf->dev = NULL;
-}
-
int dpu_core_perf_init(struct dpu_core_perf *perf,
- struct drm_device *dev,
- const struct dpu_mdss_cfg *catalog,
- struct clk *core_clk)
+ const struct dpu_perf_cfg *perf_cfg,
+ unsigned long max_core_clk_rate)
{
- perf->dev = dev;
- perf->catalog = catalog;
- perf->core_clk = core_clk;
-
- perf->max_core_clk_rate = clk_get_rate(core_clk);
- if (!perf->max_core_clk_rate) {
- DPU_DEBUG("optional max core clk rate, use default\n");
- perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
- }
+ perf->perf_cfg = perf_cfg;
+ perf->max_core_clk_rate = max_core_clk_rate;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 29bb8ee2bc26..4186977390bd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -12,8 +12,6 @@
#include "dpu_hw_catalog.h"
-#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
-
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
@@ -29,21 +27,14 @@ struct dpu_core_perf_params {
/**
* struct dpu_core_perf_tune - definition of performance tuning control
* @mode: performance mode
- * @min_core_clk: minimum core clock
- * @min_bus_vote: minimum bus vote
*/
struct dpu_core_perf_tune {
u32 mode;
- u64 min_core_clk;
- u64 min_bus_vote;
};
/**
* struct dpu_core_perf - definition of core performance context
- * @dev: Pointer to drm device
- * @debugfs_root: top level debug folder
- * @catalog: Pointer to catalog configuration
- * @core_clk: Pointer to the core clock
+ * @perf_cfg: Platform-specific performance configuration
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
* @perf_tune: debug control for performance tuning
@@ -53,10 +44,7 @@ struct dpu_core_perf_tune {
* @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
*/
struct dpu_core_perf {
- struct drm_device *dev;
- struct dentry *debugfs_root;
- const struct dpu_mdss_cfg *catalog;
- struct clk *core_clk;
+ const struct dpu_perf_cfg *perf_cfg;
u64 core_clk_rate;
u64 max_core_clk_rate;
struct dpu_core_perf_tune perf_tune;
@@ -79,11 +67,10 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
* dpu_core_perf_crtc_update - update performance of the given crtc
* @crtc: Pointer to crtc
* @params_changed: true if crtc parameters are modified
- * @stop_req: true if this is a stop request
* return: zero if success, or error code otherwise
*/
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
- int params_changed, bool stop_req);
+ int params_changed);
/**
* dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
@@ -92,22 +79,14 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
/**
- * dpu_core_perf_destroy - destroy the given core performance context
- * @perf: Pointer to core performance context
- */
-void dpu_core_perf_destroy(struct dpu_core_perf *perf);
-
-/**
* dpu_core_perf_init - initialize the given core performance context
* @perf: Pointer to core performance context
- * @dev: Pointer to drm device
- * @catalog: Pointer to catalog
- * @core_clk: pointer to core clock
+ * @perf_cfg: Pointer to platform performance configuration
+ * @max_core_clk_rate: Maximum core clock rate
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
- struct drm_device *dev,
- const struct dpu_mdss_cfg *catalog,
- struct clk *core_clk);
+ const struct dpu_perf_cfg *perf_cfg,
+ unsigned long max_core_clk_rate);
struct dpu_kms;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 1edf2b6b0a26..3c475f8042b0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -718,7 +718,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
- dpu_core_perf_crtc_update(crtc, 0, false);
+ dpu_core_perf_crtc_update(crtc, 0);
_dpu_crtc_complete_flip(crtc);
}
@@ -884,7 +884,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
/* update performance setting before crtc kickoff */
- dpu_core_perf_crtc_update(crtc, 1, false);
+ dpu_core_perf_crtc_update(crtc, 1);
/*
* Final plane updates: Give each plane a chance to complete all
@@ -1100,7 +1100,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
atomic_set(&dpu_crtc->frame_pending, 0);
}
- dpu_core_perf_crtc_update(crtc, 0, true);
+ dpu_core_perf_crtc_update(crtc, 0);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
@@ -1466,7 +1466,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_crtc *crtc = NULL;
- struct dpu_crtc *dpu_crtc = NULL;
+ struct dpu_crtc *dpu_crtc;
int i, ret;
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 493905a5b63a..1cf7ff6caff4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -152,7 +152,6 @@ enum dpu_enc_rc_states {
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
- * @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
@@ -160,7 +159,6 @@ enum dpu_enc_rc_states {
* @rc_state: resource controller state
* @delayed_off_work: delayed worker to schedule disabling of
* clks and resources after IDLE_TIMEOUT time.
- * @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
* @wide_bus_en: wide bus is enabled on this interface
@@ -194,7 +192,6 @@ struct dpu_encoder_virt {
atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
- struct timer_list vsync_event_timer;
struct msm_display_info disp_info;
@@ -202,7 +199,6 @@ struct dpu_encoder_virt {
struct mutex rc_lock;
enum dpu_enc_rc_states rc_state;
struct delayed_work delayed_off_work;
- struct kthread_work vsync_event_work;
struct msm_display_topology topology;
u32 idle_timeout;
@@ -351,8 +347,8 @@ static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- int irq,
- void (*func)(void *arg, int irq_idx),
+ unsigned int irq_idx,
+ void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info)
{
u32 irq_status;
@@ -366,54 +362,54 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
+ DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
DRMID(phys_enc->parent), func,
- irq);
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EWOULDBLOCK;
}
- if (irq < 0) {
+ if (irq_idx < 0) {
DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
DRMID(phys_enc->parent), func);
return 0;
}
- DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
+ DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), func,
- irq, phys_enc->hw_pp->idx - PINGPONG_0,
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
- irq,
+ irq_idx,
wait_info);
if (ret <= 0) {
- irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
if (irq_status) {
unsigned long flags;
- DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
DRMID(phys_enc->parent), func,
- irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
- func(phys_enc, irq);
+ func(phys_enc);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
- DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
DRMID(phys_enc->parent), func,
- irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
- func, irq,
+ func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
@@ -543,11 +539,24 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
return (num_dsc > 0) && (num_dsc > intf_count);
}
+static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv = drm_enc->dev->dev_private;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int index = dpu_enc->disp_info.h_tile_instance[0];
+
+ if (dpu_enc->disp_info.intf_type == INTF_DSI)
+ return msm_dsi_get_dsc_config(priv->dsi[index]);
+
+ return NULL;
+}
+
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode,
- struct drm_crtc_state *crtc_state)
+ struct drm_crtc_state *crtc_state,
+ struct drm_dsc_config *dsc)
{
struct msm_display_topology topology = {0};
int i, intf_count = 0;
@@ -579,7 +588,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
topology.num_intf = intf_count;
- if (dpu_enc->dsc) {
+ if (dsc) {
/*
* In case of Display Stream Compression (DSC), we would use
* 2 DSC encoders, 2 layer mixers and 1 interface
@@ -605,6 +614,7 @@ static int dpu_encoder_virt_atomic_check(
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
+ struct drm_dsc_config *dsc;
int i = 0;
int ret = 0;
@@ -640,7 +650,9 @@ static int dpu_encoder_virt_atomic_check(
}
}
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state);
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
/*
* Release and Allocate resources on every modeset
@@ -1072,14 +1084,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
- if (dpu_enc->dsc) {
- num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSC,
- hw_dsc, ARRAY_SIZE(hw_dsc));
- for (i = 0; i < num_dsc; i++) {
- dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
- dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
- }
+ num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSC,
+ hw_dsc, ARRAY_SIZE(hw_dsc));
+ for (i = 0; i < num_dsc; i++) {
+ dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
+ dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
}
dpu_enc->dsc_mask = dsc_mask;
@@ -1184,8 +1194,20 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
+ struct msm_drm_private *priv = drm_enc->dev->dev_private;
+ struct msm_display_info *disp_info;
+ int index;
dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+ index = disp_info->h_tile_instance[0];
+
+ dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+
+ if (disp_info->intf_type == INTF_DP)
+ dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
+ else if (disp_info->intf_type == INTF_DSI)
+ dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
@@ -1525,7 +1547,7 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
static int dpu_encoder_helper_wait_event_timeout(
int32_t drm_id,
- u32 irq_idx,
+ unsigned int irq_idx,
struct dpu_encoder_wait_info *info)
{
int rc = 0;
@@ -1538,7 +1560,9 @@ static int dpu_encoder_helper_wait_event_timeout(
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
- trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
+ trace_dpu_enc_wait_event_timeout(drm_id,
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
+ rc, time,
expected_time,
atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
@@ -1754,49 +1778,6 @@ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
return 0;
}
-static void dpu_encoder_vsync_event_handler(struct timer_list *t)
-{
- struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
- vsync_event_timer);
- struct drm_encoder *drm_enc = &dpu_enc->base;
- struct msm_drm_private *priv;
- struct msm_drm_thread *event_thread;
-
- if (!drm_enc->dev || !drm_enc->crtc) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- priv = drm_enc->dev->dev_private;
-
- if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
- DPU_ERROR("invalid crtc index\n");
- return;
- }
- event_thread = &priv->event_thread[drm_enc->crtc->index];
- if (!event_thread) {
- DPU_ERROR("event_thread not found for crtc:%d\n",
- drm_enc->crtc->index);
- return;
- }
-
- del_timer(&dpu_enc->vsync_event_timer);
-}
-
-static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
-{
- struct dpu_encoder_virt *dpu_enc = container_of(work,
- struct dpu_encoder_virt, vsync_event_work);
- ktime_t wakeup_time;
-
- if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
- return;
-
- trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
- mod_timer(&dpu_enc->vsync_event_timer,
- nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
-}
-
static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
@@ -1956,7 +1937,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
- ktime_t wakeup_time;
unsigned long timeout_ms;
unsigned int i;
@@ -1982,14 +1962,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
phys->ops.handle_post_kickoff(phys);
}
- if (dpu_enc->disp_info.intf_type == INTF_DSI &&
- !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
- trace_dpu_enc_early_kickoff(DRMID(drm_enc),
- ktime_to_ms(wakeup_time));
- mod_timer(&dpu_enc->vsync_event_timer,
- nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
- }
-
DPU_ATRACE_END("encoder_kickoff");
}
@@ -2108,8 +2080,10 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_pp->merge_3d->idx);
}
- if (dpu_enc->dsc)
+ if (dpu_enc->dsc) {
dpu_encoder_unprep_dsc(dpu_enc);
+ dpu_enc->dsc = NULL;
+ }
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -2290,8 +2264,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
dpu_enc->idle_pc_supported =
dpu_kms->catalog->caps->has_idle_pc;
- dpu_enc->dsc = disp_info->dsc;
-
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
@@ -2423,21 +2395,10 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
- if (disp_info->intf_type == INTF_DSI)
- timer_setup(&dpu_enc->vsync_event_timer,
- dpu_encoder_vsync_event_handler,
- 0);
- else if (disp_info->intf_type == INTF_DP)
- dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
- priv->dp[disp_info->h_tile_instance[0]]);
-
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
- kthread_init_work(&dpu_enc->vsync_event_work,
- dpu_encoder_vsync_event_work_handler);
-
memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
DPU_DEBUG_ENC(dpu_enc, "created\n");
@@ -2527,8 +2488,6 @@ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
struct dpu_enc_phys_init_params *p)
{
- int i;
-
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->hw_intf = p->hw_intf;
phys_enc->hw_wb = p->hw_wb;
@@ -2538,9 +2497,6 @@ void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
phys_enc->enc_spinlock = p->enc_spinlock;
phys_enc->enable_state = DPU_ENC_DISABLED;
- for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
- phys_enc->irq[i] = -EINVAL;
-
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 90e1925d7770..4c05fd5e9ed1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -28,7 +28,6 @@
* @is_cmd_mode Boolean to indicate if the CMD mode is requested
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
- * @dsc: DSC configuration data for DSC-enabled displays
*/
struct msm_display_info {
enum dpu_intf_type intf_type;
@@ -36,7 +35,6 @@ struct msm_display_info {
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
- struct drm_dsc_config *dsc;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index d48558ede488..6f04c3d56e77 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -193,7 +193,7 @@ struct dpu_encoder_phys {
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
- int irq[INTR_IDX_MAX];
+ unsigned int irq[INTR_IDX_MAX];
bool has_intf_te;
};
@@ -364,8 +364,8 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- int irq,
- void (*func)(void *arg, int irq_idx),
+ unsigned int irq,
+ void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b856c6286c85..be185fe69793 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -50,6 +50,7 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
ctl = phys_enc->hw_ctl;
if (!ctl->ops.setup_intf_cfg)
@@ -68,11 +69,16 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
phys_enc->hw_intf,
phys_enc->hw_pp->idx);
- if (intf_cfg.dsc != 0 && phys_enc->hw_intf->ops.enable_compression)
- phys_enc->hw_intf->ops.enable_compression(phys_enc->hw_intf);
+ if (intf_cfg.dsc != 0)
+ cmd_mode_cfg.data_compress = true;
+
+ cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+
+ if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
+ phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
}
-static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
@@ -99,19 +105,11 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
DPU_ATRACE_END("pp_done_irq");
}
-static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (phys_enc->has_intf_te) {
- if (!phys_enc->hw_intf)
- return;
- } else {
- if (!phys_enc->hw_pp)
- return;
- }
-
DPU_ATRACE_BEGIN("rd_ptr_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
@@ -122,7 +120,7 @@ static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
DPU_ATRACE_END("rd_ptr_irq");
}
-static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
@@ -135,7 +133,7 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
DPU_ATRACE_END("ctl_start_irq");
}
-static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
@@ -329,24 +327,21 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
unsigned long vsync_hz;
struct dpu_kms *dpu_kms;
- if (phys_enc->has_intf_te) {
- if (!phys_enc->hw_intf ||
- !phys_enc->hw_intf->ops.enable_tearcheck) {
- DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
- return;
- }
-
- DPU_DEBUG_CMDENC(cmd_enc, "");
- } else {
- if (!phys_enc->hw_pp ||
- !phys_enc->hw_pp->ops.enable_tearcheck) {
- DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
- return;
- }
-
- DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+ /*
+ * TODO: if/when resource allocation is refactored, move this to a
+ * place where the driver can actually return an error.
+ */
+ if (!phys_enc->has_intf_te &&
+ (!phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.enable_tearcheck)) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
}
+ DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
+ phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
+ phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
+
mode = &phys_enc->cached_mode;
dpu_kms = phys_enc->dpu_kms;
@@ -772,8 +767,19 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->intf_mode = INTF_MODE_CMD;
cmd_enc->stream_sel = 0;
- phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
- &phys_enc->hw_intf->cap->features);
+ if (!phys_enc->hw_intf) {
+ DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* DPU before 5.0 use PINGPONG for TE handling */
+ if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
+ phys_enc->has_intf_te = true;
+
+ if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
+ DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 662d74ded1b9..a01fda711883 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -40,7 +40,7 @@ static bool dpu_encoder_phys_vid_is_master(
static void drm_mode_to_intf_timing_params(
const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
- struct intf_timing_params *timing)
+ struct dpu_hw_intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
@@ -114,7 +114,7 @@ static void drm_mode_to_intf_timing_params(
}
}
-static u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@@ -123,7 +123,7 @@ static u32 get_horizontal_total(const struct intf_timing_params *timing)
return active + inactive;
}
-static u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct dpu_hw_intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@@ -148,7 +148,7 @@ static u32 get_vertical_total(const struct intf_timing_params *timing)
*/
static u32 programmable_fetch_get_num_lines(
struct dpu_encoder_phys *phys_enc,
- const struct intf_timing_params *timing)
+ const struct dpu_hw_intf_timing_params *timing)
{
u32 worst_case_needed_lines =
phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
@@ -196,9 +196,9 @@ static u32 programmable_fetch_get_num_lines(
* @timing: Pointer to the intf timing information for the requested mode
*/
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
- const struct intf_timing_params *timing)
+ const struct dpu_hw_intf_timing_params *timing)
{
- struct intf_prog_fetch f = { 0 };
+ struct dpu_hw_intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
u32 vert_total = 0;
@@ -231,7 +231,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
struct drm_display_mode mode;
- struct intf_timing_params timing_params = { 0 };
+ struct dpu_hw_intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
u32 fmt_fourcc = DRM_FORMAT_RGB888;
unsigned long lock_flags;
@@ -297,7 +297,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
programmable_fetch_config(phys_enc, &timing_params);
}
-static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_vid_vblank_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_hw_ctl *hw_ctl;
@@ -334,7 +334,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
DPU_ATRACE_END("vblank_irq");
}
-static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+static void dpu_encoder_phys_vid_underrun_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
@@ -522,7 +522,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
int ret;
- struct intf_status intf_status = {0};
+ struct dpu_hw_intf_status intf_status = {0};
if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
@@ -651,7 +651,7 @@ static int dpu_encoder_phys_vid_get_line_count(
static int dpu_encoder_phys_vid_get_frame_count(
struct dpu_encoder_phys *phys_enc)
{
- struct intf_status s = {0};
+ struct dpu_hw_intf_status s = {0};
u32 fetch_start = 0;
struct drm_display_mode mode;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index a466ff70a4d6..0b6a761d68b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -34,6 +34,23 @@ static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
return true;
}
+static bool _dpu_encoder_phys_wb_clk_force_ctrl(struct dpu_hw_wb *wb,
+ struct dpu_hw_mdp *mdp,
+ bool enable, bool *forced_on)
+{
+ if (wb->ops.setup_clk_force_ctrl) {
+ *forced_on = wb->ops.setup_clk_force_ctrl(wb, enable);
+ return true;
+ }
+
+ if (mdp->ops.setup_clk_force_ctrl) {
+ *forced_on = mdp->ops.setup_clk_force_ctrl(mdp, wb->caps->clk_ctrl, enable);
+ return true;
+ }
+
+ return false;
+}
+
/**
* dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
* @phys_enc: Pointer to physical encoder
@@ -43,6 +60,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_vbif_set_ot_params ot_params;
+ bool forced_on = false;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = hw_wb->caps->xin_id;
@@ -52,10 +70,17 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.is_wfd = true;
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
- ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
ot_params.rd = false;
+ if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
+ true, &forced_on))
+ return;
+
dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
+
+ if (forced_on)
+ _dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
+ false, &forced_on);
}
/**
@@ -67,6 +92,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
{
struct dpu_hw_wb *hw_wb;
struct dpu_vbif_set_qos_params qos_params;
+ bool forced_on = false;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
DPU_ERROR("invalid arguments\n");
@@ -83,7 +109,6 @@ static void dpu_encoder_phys_wb_set_qos_remap(
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
- qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
qos_params.num = hw_wb->idx - WB_0;
qos_params.is_rt = false;
@@ -92,7 +117,15 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt);
+ if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
+ true, &forced_on))
+ return;
+
dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
+
+ if (forced_on)
+ _dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
+ false, &forced_on);
}
/**
@@ -345,7 +378,11 @@ static void dpu_encoder_phys_wb_setup(
}
-static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+/**
+ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ */
+static void dpu_encoder_phys_wb_done_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
@@ -372,16 +409,6 @@ static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
}
/**
- * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
- * @arg: Pointer to writeback encoder
- * @irq_idx: interrupt index
- */
-static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
-{
- _dpu_encoder_phys_wb_frame_done_helper(arg);
-}
-
-/**
* dpu_encoder_phys_wb_irq_ctrl - irq control of WB
* @phys: Pointer to physical encoder
* @enable: indicates enable or disable interrupts
@@ -446,7 +473,8 @@ static int dpu_encoder_phys_wb_wait_for_commit_done(
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_WB_DONE],
dpu_encoder_phys_wb_done_irq, &wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0de507d4d7b7..a1aada630780 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -33,6 +33,9 @@
#define VIG_SC7180_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+#define VIG_SM6125_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+
#define VIG_SC7180_MASK_SDMA \
(VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
@@ -76,7 +79,7 @@
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define PINGPONG_SDM845_MASK \
- (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_TE) | BIT(DPU_PINGPONG_DSC))
+ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
#define PINGPONG_SDM845_TE2_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
@@ -93,19 +96,14 @@
#define CTL_SM8550_MASK \
(CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
-#define MERGE_3D_SM8150_MASK (0)
-
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
-#define INTF_SDM845_MASK (0)
-
#define INTF_SC7180_MASK \
(BIT(DPU_INTF_INPUT_CTRL) | \
- BIT(DPU_INTF_TE) | \
BIT(DPU_INTF_STATUS_SUPPORTED) | \
BIT(DPU_DATA_HCTL_EN))
-#define INTF_SC7280_MASK (INTF_SC7180_MASK | BIT(DPU_INTF_DATA_COMPRESS))
+#define INTF_SC7280_MASK (INTF_SC7180_MASK)
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
@@ -252,15 +250,15 @@ static const uint32_t wb2_formats[] = {
*************************************************************/
/* SSPP common configuration */
-#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
+#define _VIG_SBLK(sdma_pri, qseed_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
- .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .scaler_blk = {.name = "scaler", \
.id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
- .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .csc_blk = {.name = "csc", \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
@@ -270,15 +268,15 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = NULL, \
}
-#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \
+#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
- .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .scaler_blk = {.name = "scaler", \
.id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
- .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .csc_blk = {.name = "csc", \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
@@ -288,7 +286,7 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = rot_cfg, \
}
-#define _DMA_SBLK(num, sdma_pri) \
+#define _DMA_SBLK(sdma_pri) \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
@@ -300,13 +298,13 @@ static const uint32_t wb2_formats[] = {
}
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
- _VIG_SBLK("0", 0, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
- _VIG_SBLK("1", 0, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
- _VIG_SBLK("2", 0, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
- _VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
@@ -315,61 +313,52 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
};
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
- _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
- _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
- _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
- _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
-
-#define SSPP_BLK(_name, _id, _base, _len, _features, \
- _sblk, _xinid, _type, _clkctrl) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = _len, \
- .features = _features, \
- .sblk = &_sblk, \
- .xin_id = _xinid, \
- .type = _type, \
- .clk_ctrl = _clkctrl \
- }
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
- _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
- _VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
+ _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
- _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
+ _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
- _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
- _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
- _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
- _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
- _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
- _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
- _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED4);
+ _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
- _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK("12", 5);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6);
+ _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
-#define _VIG_SBLK_NOSCALE(num, sdma_pri) \
+#define _VIG_SBLK_NOSCALE(sdma_pri) \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
@@ -380,24 +369,13 @@ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6);
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE("0", 2);
-static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
+static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
/*************************************************************
* MIXER sub blocks config
*************************************************************/
-#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = 0x320, \
- .features = _fmask, \
- .sblk = _sblk, \
- .pingpong = _pp, \
- .lm_pair_mask = (1 << _lmpair), \
- .dspp = _dspp \
- }
-
/* MSM8998 */
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
@@ -444,151 +422,48 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
* DSPP sub blocks config
*************************************************************/
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
- .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
.len = 0x90, .version = 0x10007},
};
-static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
- .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
+ .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
.len = 0x90, .version = 0x40000},
};
-#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x1800, \
- .features = _mask, \
- .sblk = _sblk \
- }
-
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
.version = 0x1},
- .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
- .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
- .dither = {.id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
.len = 0x20, .version = 0x20000},
};
-#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0, \
- .features = BIT(DPU_PINGPONG_DITHER), \
- .merge_3d = _merge_3d, \
- .sblk = &_sblk, \
- .intr_done = _done, \
- .intr_rdptr = _rdptr, \
- }
-#define PP_BLK(_name, _id, _base, _features, _merge_3d, _sblk, _done, _rdptr) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0xd4, \
- .features = _features, \
- .merge_3d = _merge_3d, \
- .sblk = &_sblk, \
- .intr_done = _done, \
- .intr_rdptr = _rdptr, \
- }
-
-/*************************************************************
- * MERGE_3D sub blocks config
- *************************************************************/
-#define MERGE_3D_BLK(_name, _id, _base) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x8, \
- .features = MERGE_3D_SM8150_MASK, \
- .sblk = NULL \
- }
-
/*************************************************************
* DSC sub blocks config
*************************************************************/
static const struct dpu_dsc_sub_blks dsc_sblk_0 = {
- .enc = {.base = 0x100, .len = 0x100},
- .ctl = {.base = 0xF00, .len = 0x10},
+ .enc = {.name = "enc", .base = 0x100, .len = 0x9c},
+ .ctl = {.name = "ctl", .base = 0xF00, .len = 0x10},
};
static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
- .enc = {.base = 0x200, .len = 0x100},
- .ctl = {.base = 0xF80, .len = 0x10},
+ .enc = {.name = "enc", .base = 0x200, .len = 0x9c},
+ .ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
};
-#define DSC_BLK(_name, _id, _base, _features) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x140, \
- .features = _features, \
- }
-
-#define DSC_BLK_1_2(_name, _id, _base, _len, _features, _sblk) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = _len, \
- .features = BIT(DPU_DSC_HW_REV_1_2) | _features, \
- .sblk = &_sblk, \
- }
-
-/*************************************************************
- * INTF sub blocks config
- *************************************************************/
-#define INTF_BLK(_name, _id, _base, _len, _type, _ctrl_id, _progfetch, _features, _underrun, _vsync) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = _len, \
- .features = _features, \
- .type = _type, \
- .controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = _progfetch, \
- .intr_underrun = _underrun, \
- .intr_vsync = _vsync, \
- .intr_tear_rd_ptr = -1, \
- }
-
-/* DSI Interface sub-block with TEAR registers (since DPU 5.0.0) */
-#define INTF_BLK_DSI_TE(_name, _id, _base, _len, _type, _ctrl_id, _progfetch, _features, _underrun, _vsync, _tear_rd_ptr) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = _len, \
- .features = _features, \
- .type = _type, \
- .controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = _progfetch, \
- .intr_underrun = _underrun, \
- .intr_vsync = _vsync, \
- .intr_tear_rd_ptr = _tear_rd_ptr, \
- }
-
-/*************************************************************
- * Writeback blocks config
- *************************************************************/
-#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
- __xin_id, vbif_id, _reg, _max_linewidth, _wb_done_bit) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = 0x2c8, \
- .features = _features, \
- .format_list = wb2_formats, \
- .num_formats = ARRAY_SIZE(wb2_formats), \
- .clk_ctrl = _clk_ctrl, \
- .xin_id = __xin_id, \
- .vbif_idx = vbif_id, \
- .maxlinewidth = _max_linewidth, \
- .intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
- }
-
/*************************************************************
* VBIF sub blocks config
*************************************************************/
@@ -663,6 +538,26 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = {
},
};
+static const struct dpu_vbif_cfg sm8550_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 16,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
/*************************************************************
* PERF data config
*************************************************************/
@@ -762,6 +657,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
+#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"
#include "catalog/dpu_6_2_sc7180.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index b860784ade72..df024e10d3a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -29,23 +29,9 @@
#define MAX_XIN_COUNT 16
/**
- * Supported UBWC feature versions
- */
-enum {
- DPU_HW_UBWC_VER_10 = 0x100,
- DPU_HW_UBWC_VER_20 = 0x200,
- DPU_HW_UBWC_VER_30 = 0x300,
- DPU_HW_UBWC_VER_40 = 0x400,
-};
-
-/**
* MDP TOP BLOCK features
* @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
- * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
- * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
- * compression initial revision
- * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
* @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
* in a failure
* @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
@@ -56,9 +42,6 @@ enum {
enum {
DPU_MDP_PANIC_PER_PIPE = 0x1,
DPU_MDP_10BIT_SUPPORT,
- DPU_MDP_BWC,
- DPU_MDP_UBWC_1_0,
- DPU_MDP_UBWC_1_5,
DPU_MDP_AUDIO_SELECT,
DPU_MDP_PERIPH_0_REMOVED,
DPU_MDP_VSYNC_SEL,
@@ -136,7 +119,6 @@ enum {
/**
* PINGPONG sub-blocks
- * @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
@@ -145,8 +127,7 @@ enum {
* @DPU_PINGPONG_MAX
*/
enum {
- DPU_PINGPONG_TE = 0x1,
- DPU_PINGPONG_TE2,
+ DPU_PINGPONG_TE2 = 0x1,
DPU_PINGPONG_SPLIT,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
@@ -177,19 +158,15 @@ enum {
* INTF sub-blocks
* @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
* pixel data arrives to this INTF
- * @DPU_INTF_TE INTF block has TE configuration support
* @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
* than video timing
* @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
- * @DPU_INTF_DATA_COMPRESS INTF block has DATA_COMPRESS register
* @DPU_INTF_MAX
*/
enum {
DPU_INTF_INPUT_CTRL = 0x1,
- DPU_INTF_TE,
DPU_DATA_HCTL_EN,
DPU_INTF_STATUS_SUPPORTED,
- DPU_INTF_DATA_COMPRESS,
DPU_INTF_MAX
};
@@ -505,19 +482,6 @@ struct dpu_mdp_cfg {
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
-/**
- * struct dpu_ubwc_cfg - UBWC and memory configuration
- *
- * @ubwc_version UBWC feature version (0x0 for not supported)
- * @highest_bank_bit: UBWC parameter
- * @ubwc_swizzle: ubwc default swizzle setting
- */
-struct dpu_ubwc_cfg {
- u32 ubwc_version;
- u32 highest_bank_bit;
- u32 ubwc_swizzle;
-};
-
/* struct dpu_ctl_cfg : MDP CTL instance info
* @id: index identifying this block
* @base: register base offset to mdss
@@ -526,7 +490,7 @@ struct dpu_ubwc_cfg {
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
- s32 intr_start;
+ unsigned int intr_start;
};
/**
@@ -554,14 +518,14 @@ struct dpu_sspp_cfg {
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_NONE if unsupported
- * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ * @lm_pair: ID of LM that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 dspp;
- unsigned long lm_pair_mask;
+ unsigned long lm_pair;
};
/**
@@ -589,8 +553,8 @@ struct dpu_dspp_cfg {
struct dpu_pingpong_cfg {
DPU_HW_BLK_INFO;
u32 merge_3d;
- s32 intr_done;
- s32 intr_rdptr;
+ unsigned int intr_done;
+ unsigned int intr_rdptr;
const struct dpu_pingpong_sub_blks *sblk;
};
@@ -637,9 +601,9 @@ struct dpu_intf_cfg {
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
- s32 intr_underrun;
- s32 intr_vsync;
- s32 intr_tear_rd_ptr;
+ unsigned int intr_underrun;
+ unsigned int intr_vsync;
+ unsigned int intr_tear_rd_ptr;
};
/**
@@ -658,7 +622,7 @@ struct dpu_wb_cfg {
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
- s32 intr_wb_done;
+ unsigned int intr_wb_done;
const u32 *format_list;
u32 num_formats;
enum dpu_clk_ctrl_type clk_ctrl;
@@ -747,6 +711,16 @@ struct dpu_perf_cdp_cfg {
};
/**
+ * struct dpu_mdss_version - DPU's major and minor versions
+ * @core_major_ver: DPU core's major version
+ * @core_minor_ver: DPU core's minor version
+ */
+struct dpu_mdss_version {
+ u8 core_major_ver;
+ u8 core_minor_ver;
+};
+
+/**
* struct dpu_perf_cfg - performance control settings
* @max_bw_low low threshold of maximum bandwidth (kbps)
* @max_bw_high high threshold of maximum bandwidth (kbps)
@@ -796,20 +770,19 @@ struct dpu_perf_cfg {
/**
* struct dpu_mdss_cfg - information of MDSS HW
* This is the main catalog data structure representing
- * this HW version. Contains number of instances,
- * register offsets, capabilities of the all MDSS HW sub-blocks.
+ * this HW version. Contains dpu's major and minor versions,
+ * number of instances, register offsets, capabilities of the
+ * all MDSS HW sub-blocks.
*
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
- * @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
- const struct dpu_caps *caps;
+ const struct dpu_mdss_version *mdss_ver;
- const struct dpu_ubwc_cfg *ubwc;
+ const struct dpu_caps *caps;
- u32 mdp_count;
const struct dpu_mdp_cfg *mdp;
u32 ctl_count;
@@ -850,8 +823,6 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *dma_formats;
const struct dpu_format_extended *cursor_formats;
const struct dpu_format_extended *vig_formats;
-
- unsigned long mdss_irqs;
};
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
@@ -861,6 +832,7 @@ extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 5e2d68ebb113..088807db2c83 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -51,11 +51,9 @@ struct dpu_intr_reg {
};
/*
- * struct dpu_intr_reg - List of DPU interrupt registers
- *
- * When making changes be sure to sync with dpu_hw_intr_reg
+ * dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
*/
-static const struct dpu_intr_reg dpu_intr_set[] = {
+static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
[MDP_SSPP_TOP0_INTR] = {
INTR_CLEAR,
INTR_EN,
@@ -121,84 +119,118 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_AD4_INTR_EN_OFF(1),
MDP_AD4_INTR_STATUS_OFF(1),
},
- [MDP_INTF0_7xxx_INTR] = {
+};
+
+/*
+ * dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
+ */
+static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
+ [MDP_SSPP_TOP0_INTR] = {
+ INTR_CLEAR,
+ INTR_EN,
+ INTR_STATUS
+ },
+ [MDP_SSPP_TOP0_INTR2] = {
+ INTR2_CLEAR,
+ INTR2_EN,
+ INTR2_STATUS
+ },
+ [MDP_SSPP_TOP0_HIST_INTR] = {
+ HIST_INTR_CLEAR,
+ HIST_INTR_EN,
+ HIST_INTR_STATUS
+ },
+ [MDP_INTF0_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(0),
MDP_INTF_REV_7xxx_INTR_EN(0),
MDP_INTF_REV_7xxx_INTR_STATUS(0)
},
- [MDP_INTF1_7xxx_INTR] = {
+ [MDP_INTF1_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(1),
MDP_INTF_REV_7xxx_INTR_EN(1),
MDP_INTF_REV_7xxx_INTR_STATUS(1)
},
- [MDP_INTF1_7xxx_TEAR_INTR] = {
+ [MDP_INTF1_TEAR_INTR] = {
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
},
- [MDP_INTF2_7xxx_INTR] = {
+ [MDP_INTF2_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(2),
MDP_INTF_REV_7xxx_INTR_EN(2),
MDP_INTF_REV_7xxx_INTR_STATUS(2)
},
- [MDP_INTF2_7xxx_TEAR_INTR] = {
+ [MDP_INTF2_TEAR_INTR] = {
MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
},
- [MDP_INTF3_7xxx_INTR] = {
+ [MDP_INTF3_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(3),
MDP_INTF_REV_7xxx_INTR_EN(3),
MDP_INTF_REV_7xxx_INTR_STATUS(3)
},
- [MDP_INTF4_7xxx_INTR] = {
+ [MDP_INTF4_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(4),
MDP_INTF_REV_7xxx_INTR_EN(4),
MDP_INTF_REV_7xxx_INTR_STATUS(4)
},
- [MDP_INTF5_7xxx_INTR] = {
+ [MDP_INTF5_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(5),
MDP_INTF_REV_7xxx_INTR_EN(5),
MDP_INTF_REV_7xxx_INTR_STATUS(5)
},
- [MDP_INTF6_7xxx_INTR] = {
+ [MDP_INTF6_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(6),
MDP_INTF_REV_7xxx_INTR_EN(6),
MDP_INTF_REV_7xxx_INTR_STATUS(6)
},
- [MDP_INTF7_7xxx_INTR] = {
+ [MDP_INTF7_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(7),
MDP_INTF_REV_7xxx_INTR_EN(7),
MDP_INTF_REV_7xxx_INTR_STATUS(7)
},
- [MDP_INTF8_7xxx_INTR] = {
+ [MDP_INTF8_INTR] = {
MDP_INTF_REV_7xxx_INTR_CLEAR(8),
MDP_INTF_REV_7xxx_INTR_EN(8),
MDP_INTF_REV_7xxx_INTR_STATUS(8)
},
};
-#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
-#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
+#define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
+
+static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
+{
+ return irq_idx && irq_idx <= DPU_NUM_IRQS;
+}
+
+static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
+ unsigned int irq_idx)
+{
+ return &intr->irq_tbl[irq_idx - 1];
+}
/**
* dpu_core_irq_callback_handler - dispatch core interrupts
* @dpu_kms: Pointer to DPU's KMS structure
* @irq_idx: interrupt index
*/
-static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
+static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
{
- VERB("irq_idx=%d\n", irq_idx);
+ struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+
+ VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
- if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
- DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
+ if (!irq_entry->cb)
+ DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
- atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
+ atomic_inc(&irq_entry->count);
/*
* Perform registered function callback
*/
- dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
+ irq_entry->cb(irq_entry->arg);
}
irqreturn_t dpu_core_irq(struct msm_kms *kms)
@@ -206,7 +238,7 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
- int irq_idx;
+ unsigned int irq_idx;
u32 irq_status;
u32 enable_mask;
int bit;
@@ -216,19 +248,19 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms)
return IRQ_NONE;
spin_lock_irqsave(&intr->irq_lock, irq_flags);
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
if (!test_bit(reg_idx, &intr->irq_mask))
continue;
/* Read interrupt status */
- irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
+ irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
/* Read enable mask */
- enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
+ enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
/* and clear the interrupt */
if (irq_status)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
irq_status);
/* Finally update IRQ status based on enable mask */
@@ -262,7 +294,8 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms)
return IRQ_HANDLED;
}
-static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
+ unsigned int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
@@ -272,8 +305,9 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr)
return -EINVAL;
- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
- pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+ pr_err("invalid IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
@@ -285,7 +319,11 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
assert_spin_locked(&intr->irq_lock);
reg_idx = DPU_IRQ_REG(irq_idx);
- reg = &dpu_intr_set[reg_idx];
+ reg = &intr->intr_set[reg_idx];
+
+ /* Is this interrupt register supported on the platform */
+ if (WARN_ON(!reg->en_off))
+ return -EINVAL;
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
@@ -305,13 +343,15 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
+ pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
-static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
+ unsigned int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
@@ -321,8 +361,9 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr)
return -EINVAL;
- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
- pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+ pr_err("invalid IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
@@ -334,7 +375,7 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
assert_spin_locked(&intr->irq_lock);
reg_idx = DPU_IRQ_REG(irq_idx);
- reg = &dpu_intr_set[reg_idx];
+ reg = &intr->intr_set[reg_idx];
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
@@ -354,7 +395,8 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
+ pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
@@ -368,10 +410,10 @@ static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
if (!intr)
return;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ for (i = 0; i < MDP_INTR_MAX; i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
- dpu_intr_set[i].clr_off, 0xffffffff);
+ intr->intr_set[i].clr_off, 0xffffffff);
}
/* ensure register writes go through */
@@ -386,17 +428,18 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
if (!intr)
return;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ for (i = 0; i < MDP_INTR_MAX; i++) {
if (test_bit(i, &intr->irq_mask))
DPU_REG_WRITE(&intr->hw,
- dpu_intr_set[i].en_off, 0x00000000);
+ intr->intr_set[i].en_off, 0x00000000);
}
/* ensure register writes go through */
wmb();
}
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
+ unsigned int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
@@ -406,14 +449,8 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
if (!intr)
return 0;
- if (irq_idx < 0) {
- DPU_ERROR("[%pS] invalid irq_idx=%d\n",
- __builtin_return_address(0), irq_idx);
- return 0;
- }
-
- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
- pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+ pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
@@ -421,10 +458,10 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
reg_idx = DPU_IRQ_REG(irq_idx);
intr_status = DPU_REG_READ(&intr->hw,
- dpu_intr_set[reg_idx].status_off) &
+ intr->intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
if (intr_status)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
intr_status);
/* ensure register writes go through */
@@ -435,30 +472,40 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
return intr_status;
}
-static void __intr_offset(const struct dpu_mdss_cfg *m,
- void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
-{
- hw->blk_addr = addr + m->mdp[0].base;
-}
-
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
- int nirq = MDP_INTR_MAX * 32;
+ unsigned int i;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
- __intr_offset(m, addr, &intr->hw);
+ if (m->mdss_ver->core_major_ver >= 7)
+ intr->intr_set = dpu_intr_set_7xxx;
+ else
+ intr->intr_set = dpu_intr_set_legacy;
+
+ intr->hw.blk_addr = addr + m->mdp[0].base;
- intr->total_irqs = nirq;
+ intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
+ BIT(MDP_SSPP_TOP0_INTR2) |
+ BIT(MDP_SSPP_TOP0_HIST_INTR);
+ for (i = 0; i < m->intf_count; i++) {
+ const struct dpu_intf_cfg *intf = &m->intf[i];
- intr->irq_mask = m->mdss_irqs;
+ if (intf->type == INTF_NONE)
+ continue;
+
+ intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
+
+ if (intf->intr_tear_rd_ptr)
+ intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
+ }
spin_lock_init(&intr->irq_lock);
@@ -470,76 +517,87 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
kfree(intr);
}
-int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- void (*irq_cb)(void *arg, int irq_idx),
- void *irq_arg)
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
+ unsigned int irq_idx,
+ void (*irq_cb)(void *arg),
+ void *irq_arg)
{
+ struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
if (!irq_cb) {
- DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
+ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+ DPU_ERROR("invalid IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+ if (unlikely(WARN_ON(irq_entry->cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return -EBUSY;
}
- trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
- dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
- dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+ trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ irq_entry->arg = irq_arg;
+ irq_entry->cb = irq_cb;
ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
- irq_idx);
+ DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_irq_register_success(irq_idx);
+ trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
+ unsigned int irq_idx)
{
+ struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ if (!dpu_core_irq_is_valid(irq_idx)) {
+ DPU_ERROR("invalid IRQ=[%d, %d]\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_unregister_callback(irq_idx);
+ trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
- irq_idx, ret);
+ DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
- dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
- dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+ irq_entry->cb = NULL;
+ irq_entry->arg = NULL;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_irq_unregister_success(irq_idx);
+ trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
@@ -548,18 +606,21 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
+ struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int i, irq_count;
void *cb;
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ for (i = 1; i <= DPU_NUM_IRQS; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
- cb = dpu_kms->hw_intr->irq_tbl[i].cb;
+ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+ irq_count = atomic_read(&irq_entry->count);
+ cb = irq_entry->cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (irq_count || cb)
- seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
+ seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
+ DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
}
return 0;
@@ -578,6 +639,7 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct dpu_hw_intr_entry *irq_entry;
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -585,22 +647,28 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
+ for (i = 1; i <= DPU_NUM_IRQS; i++) {
+ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+ atomic_set(&irq_entry->count, 0);
+ }
}
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct dpu_hw_intr_entry *irq_entry;
int i;
if (!dpu_kms->hw_intr)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- if (dpu_kms->hw_intr->irq_tbl[i].cb)
- DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+ for (i = 1; i <= DPU_NUM_IRQS; i++) {
+ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+ if (irq_entry->cb)
+ DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
+ DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
+ }
dpu_clear_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 1f2dabc54c22..53a21ebc57e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -17,31 +17,36 @@ enum dpu_hw_intr_reg {
MDP_SSPP_TOP0_INTR,
MDP_SSPP_TOP0_INTR2,
MDP_SSPP_TOP0_HIST_INTR,
+ /* All MDP_INTFn_INTR should come sequentially */
MDP_INTF0_INTR,
MDP_INTF1_INTR,
MDP_INTF2_INTR,
MDP_INTF3_INTR,
MDP_INTF4_INTR,
MDP_INTF5_INTR,
+ MDP_INTF6_INTR,
+ MDP_INTF7_INTR,
+ MDP_INTF8_INTR,
MDP_INTF1_TEAR_INTR,
MDP_INTF2_TEAR_INTR,
MDP_AD4_0_INTR,
MDP_AD4_1_INTR,
- MDP_INTF0_7xxx_INTR,
- MDP_INTF1_7xxx_INTR,
- MDP_INTF1_7xxx_TEAR_INTR,
- MDP_INTF2_7xxx_INTR,
- MDP_INTF2_7xxx_TEAR_INTR,
- MDP_INTF3_7xxx_INTR,
- MDP_INTF4_7xxx_INTR,
- MDP_INTF5_7xxx_INTR,
- MDP_INTF6_7xxx_INTR,
- MDP_INTF7_7xxx_INTR,
- MDP_INTF8_7xxx_INTR,
MDP_INTR_MAX,
};
-#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
+#define MDP_INTFn_INTR(intf) (MDP_INTF0_INTR + (intf - INTF_0))
+
+#define DPU_IRQ_IDX(reg_idx, offset) (1 + reg_idx * 32 + offset)
+#define DPU_IRQ_REG(irq_idx) ((irq_idx - 1) / 32)
+#define DPU_IRQ_BIT(irq_idx) ((irq_idx - 1) % 32)
+
+#define DPU_NUM_IRQS (MDP_INTR_MAX * 32)
+
+struct dpu_hw_intr_entry {
+ void (*cb)(void *arg);
+ void *arg;
+ atomic_t count;
+};
/**
* struct dpu_hw_intr: hw interrupts handling data structure
@@ -49,7 +54,6 @@ enum dpu_hw_intr_reg {
* @ops: function pointer mapping for IRQ handling
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
- * @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
* @irq_cb_tbl: array of IRQ callbacks
*/
@@ -57,15 +61,11 @@ struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
- u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
+ const struct dpu_intr_reg *intr_set;
- struct {
- void (*cb)(void *arg, int irq_idx);
- void *arg;
- atomic_t count;
- } irq_tbl[];
+ struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 5b0f6627e29b..e8b8908d3e12 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -95,7 +95,7 @@
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
- const struct intf_timing_params *p,
+ const struct dpu_hw_intf_timing_params *p,
const struct dpu_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -244,7 +244,7 @@ static void dpu_hw_intf_enable_timing_engine(
static void dpu_hw_intf_setup_prg_fetch(
struct dpu_hw_intf *intf,
- const struct intf_prog_fetch *fetch)
+ const struct dpu_hw_intf_prog_fetch *fetch)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
int fetch_enable;
@@ -286,7 +286,7 @@ static void dpu_hw_intf_bind_pingpong_blk(
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
- struct intf_status *s)
+ struct dpu_hw_intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
unsigned long cap = intf->cap->features;
@@ -513,42 +513,22 @@ static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf,
}
-static void dpu_hw_intf_enable_compression(struct dpu_hw_intf *ctx)
+static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
+ struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg)
{
u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2);
- intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
+ if (cmd_mode_cfg->data_compress)
+ intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
- DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
-}
-
-static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
- unsigned long cap)
-{
- ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
- ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
- ops->get_status = dpu_hw_intf_get_status;
- ops->enable_timing = dpu_hw_intf_enable_timing_engine;
- ops->get_line_count = dpu_hw_intf_get_line_count;
- if (cap & BIT(DPU_INTF_INPUT_CTRL))
- ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
- ops->setup_misr = dpu_hw_intf_setup_misr;
- ops->collect_misr = dpu_hw_intf_collect_misr;
-
- if (cap & BIT(DPU_INTF_TE)) {
- ops->enable_tearcheck = dpu_hw_intf_enable_te;
- ops->disable_tearcheck = dpu_hw_intf_disable_te;
- ops->connect_external_te = dpu_hw_intf_connect_external_te;
- ops->vsync_sel = dpu_hw_intf_vsync_sel;
- ops->disable_autorefresh = dpu_hw_intf_disable_autorefresh;
- }
+ if (cmd_mode_cfg->wide_bus_en)
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
- if (cap & BIT(DPU_INTF_DATA_COMPRESS))
- ops->enable_compression = dpu_hw_intf_enable_compression;
+ DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_intf *c;
@@ -569,7 +549,35 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
*/
c->idx = cfg->id;
c->cap = cfg;
- _setup_intf_ops(&c->ops, c->cap->features);
+
+ c->ops.setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ c->ops.setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ c->ops.get_status = dpu_hw_intf_get_status;
+ c->ops.enable_timing = dpu_hw_intf_enable_timing_engine;
+ c->ops.get_line_count = dpu_hw_intf_get_line_count;
+ c->ops.setup_misr = dpu_hw_intf_setup_misr;
+ c->ops.collect_misr = dpu_hw_intf_collect_misr;
+
+ if (cfg->features & BIT(DPU_INTF_INPUT_CTRL))
+ c->ops.bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
+
+ /* INTF TE is only for DSI interfaces */
+ if (mdss_rev->core_major_ver >= 5 && cfg->type == INTF_DSI) {
+ WARN_ON(!cfg->intr_tear_rd_ptr);
+
+ c->ops.enable_tearcheck = dpu_hw_intf_enable_te;
+ c->ops.disable_tearcheck = dpu_hw_intf_disable_te;
+ c->ops.connect_external_te = dpu_hw_intf_connect_external_te;
+ c->ops.vsync_sel = dpu_hw_intf_vsync_sel;
+ c->ops.disable_autorefresh = dpu_hw_intf_disable_autorefresh;
+ }
+
+ /* Technically, INTF_CONFIG2 is present for DPU 5.0+, but
+ * we can configure it for DPU 7.0+ since the wide bus and DSC flags
+ * would not be set for DPU < 7.0 anyways
+ */
+ if (mdss_rev->core_major_ver >= 7)
+ c->ops.program_intf_cmd_cfg = dpu_hw_intf_program_intf_cmd_cfg;
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 99e21c4137f9..c539025c418b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -14,7 +14,7 @@
struct dpu_hw_intf;
/* intf timing settings */
-struct intf_timing_params {
+struct dpu_hw_intf_timing_params {
u32 width; /* active width */
u32 height; /* active height */
u32 xres; /* Display panel width */
@@ -35,19 +35,24 @@ struct intf_timing_params {
bool wide_bus_en;
};
-struct intf_prog_fetch {
+struct dpu_hw_intf_prog_fetch {
u8 enable;
/* vsync counter for the front porch pixel line */
u32 fetch_start;
};
-struct intf_status {
+struct dpu_hw_intf_status {
u8 is_en; /* interface timing engine is enabled or not */
u8 is_prog_fetch_en; /* interface prog fetch counter is enabled or not */
u32 frame_count; /* frame count since timing engine enabled */
u32 line_count; /* current line count including blanking */
};
+struct dpu_hw_intf_cmd_mode_cfg {
+ u8 data_compress; /* enable data compress between dpu and dsi */
+ u8 wide_bus_en; /* enable databus widen mode */
+};
+
/**
* struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
* Assumption is these functions will be called after clocks are enabled
@@ -70,21 +75,21 @@ struct intf_status {
* @get_autorefresh: Retrieve autorefresh config from hardware
* Return: 0 on success, -ETIMEDOUT on timeout
* @vsync_sel: Select vsync signal for tear-effect configuration
- * @enable_compression: Enable data compression
+ * @program_intf_cmd_cfg: Program the DPU to interface datapath for command mode
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
- const struct intf_timing_params *p,
+ const struct dpu_hw_intf_timing_params *p,
const struct dpu_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
- const struct intf_prog_fetch *fetch);
+ const struct dpu_hw_intf_prog_fetch *fetch);
void (*enable_timing)(struct dpu_hw_intf *intf,
u8 enable);
void (*get_status)(struct dpu_hw_intf *intf,
- struct intf_status *status);
+ struct dpu_hw_intf_status *status);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
@@ -108,7 +113,8 @@ struct dpu_hw_intf_ops {
*/
void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay);
- void (*enable_compression)(struct dpu_hw_intf *intf);
+ void (*program_intf_cmd_cfg)(struct dpu_hw_intf *intf,
+ struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg);
};
struct dpu_hw_intf {
@@ -127,9 +133,10 @@ struct dpu_hw_intf {
* interface catalog entry.
* @cfg: interface catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 02a0f48aac94..d85157acfbf8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -101,11 +101,6 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_MAX,
};
-enum dpu_mdp {
- MDP_TOP = 0x1,
- MDP_MAX,
-};
-
enum dpu_sspp {
SSPP_NONE,
SSPP_VIG0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 437d9e62a841..057cac7f5d93 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -281,10 +281,24 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
-static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
- unsigned long features)
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
- if (test_bit(DPU_PINGPONG_TE, &features)) {
+ struct dpu_hw_pingpong *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
+
+ c->idx = cfg->id;
+ c->caps = cfg;
+
+ if (mdss_rev->core_major_ver < 5) {
+ WARN_ON(!cfg->intr_rdptr);
+
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
@@ -292,31 +306,14 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
- if (test_bit(DPU_PINGPONG_DSC, &features)) {
+ if (test_bit(DPU_PINGPONG_DSC, &cfg->features)) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
- if (test_bit(DPU_PINGPONG_DITHER, &features))
+ if (test_bit(DPU_PINGPONG_DITHER, &cfg->features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
-};
-
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr)
-{
- struct dpu_hw_pingpong *c;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- c->hw.blk_addr = addr + cfg->base;
- c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
-
- c->idx = cfg->id;
- c->caps = cfg;
- _setup_pingpong_ops(c, c->caps->features);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index d3246a9a5808..0d541ca5b056 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -123,10 +123,11 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
* pingpong catalog entry.
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_pingpong context
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index b364cf75bb3f..8e3c65989c49 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -8,6 +8,8 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
+#include "msm_mdss.h"
+
#include <drm/drm_file.h>
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -67,6 +69,7 @@
#define SSPP_EXCL_REC_XY_REC1 0x188
#define SSPP_EXCL_REC_SIZE 0x1B4
#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_CLK_CTRL 0x330
/* SSPP_SRC_OP_MODE & OP_MODE_REC1 */
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
@@ -270,26 +273,26 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
DPU_FETCH_CONFIG_RESET_VALUE |
ctx->ubwc->highest_bank_bit << 18);
- switch (ctx->ubwc->ubwc_version) {
- case DPU_HW_UBWC_VER_10:
+ switch (ctx->ubwc->ubwc_enc_version) {
+ case UBWC_1_0:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->ubwc->ubwc_swizzle & 0x1) |
BIT(8) |
(ctx->ubwc->highest_bank_bit << 4));
break;
- case DPU_HW_UBWC_VER_20:
+ case UBWC_2_0:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->ubwc->ubwc_swizzle) |
(ctx->ubwc->highest_bank_bit << 4));
break;
- case DPU_HW_UBWC_VER_30:
+ case UBWC_3_0:
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
BIT(30) | (ctx->ubwc->ubwc_swizzle) |
(ctx->ubwc->highest_bank_bit << 4));
break;
- case DPU_HW_UBWC_VER_40:
+ case UBWC_4_0:
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
break;
@@ -579,8 +582,18 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
dpu_setup_cdp(&ctx->hw, cdp_cntl_offset, fmt, enable);
}
+static bool dpu_hw_sspp_setup_clk_force_ctrl(struct dpu_hw_sspp *ctx, bool enable)
+{
+ static const struct dpu_clk_ctrl_reg sspp_clk_ctrl = {
+ .reg_off = SSPP_CLK_CTRL,
+ .bit_off = 0
+ };
+
+ return dpu_hw_clk_force_ctrl(&ctx->hw, &sspp_clk_ctrl, enable);
+}
+
static void _setup_layer_ops(struct dpu_hw_sspp *c,
- unsigned long features)
+ unsigned long features, const struct dpu_mdss_version *mdss_rev)
{
c->ops.setup_format = dpu_hw_sspp_setup_format;
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
@@ -610,6 +623,9 @@ static void _setup_layer_ops(struct dpu_hw_sspp *c,
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+
+ if (mdss_rev->core_major_ver >= 9)
+ c->ops.setup_clk_force_ctrl = dpu_hw_sspp_setup_clk_force_ctrl;
}
#ifdef CONFIG_DEBUG_FS
@@ -670,11 +686,12 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
#endif
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct dpu_ubwc_cfg *ubwc)
+ void __iomem *addr, const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
- if (!addr || !ubwc)
+ if (!addr)
return ERR_PTR(-EINVAL);
hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
@@ -685,10 +702,10 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
hw_pipe->hw.log_mask = DPU_DBG_MASK_SSPP;
/* Assign ops */
- hw_pipe->ubwc = ubwc;
+ hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
- _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features, mdss_rev);
return hw_pipe;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 085f34bc6b88..f93969fddb22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -272,6 +272,14 @@ struct dpu_hw_sspp_ops {
bool danger_safe_en);
/**
+ * setup_clk_force_ctrl - setup clock force control
+ * @ctx: Pointer to pipe context
+ * @enable: enable clock force if true
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_sspp *ctx,
+ bool enable);
+
+ /**
* setup_histogram - setup histograms
* @ctx: Pointer to pipe context
* @cfg: Pointer to histogram configuration
@@ -317,7 +325,7 @@ struct dpu_hw_sspp_ops {
struct dpu_hw_sspp {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
- const struct dpu_ubwc_cfg *ubwc;
+ const struct msm_mdss_data *ubwc;
/* Pipe */
enum dpu_sspp idx;
@@ -333,10 +341,12 @@ struct dpu_kms;
* Should be called once before accessing every pipe.
* @cfg: Pipe catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
- * @ubwc: UBWC configuration data
+ * @mdss_data: UBWC / MDSS configuration data
+ * @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct dpu_ubwc_cfg *ubwc);
+ void __iomem *addr, const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 963bdb5e0252..24e734768a72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -66,34 +66,13 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
- struct dpu_hw_blk_reg_map *c;
- u32 reg_off, bit_off;
- u32 reg_val, new_val;
- bool clk_forced_on;
-
if (!mdp)
return false;
- c = &mdp->hw;
-
if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
return false;
- reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
- bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
-
- reg_val = DPU_REG_READ(c, reg_off);
-
- if (enable)
- new_val = reg_val | BIT(bit_off);
- else
- new_val = reg_val & ~BIT(bit_off);
-
- DPU_REG_WRITE(c, reg_off, new_val);
-
- clk_forced_on = !(reg_val & BIT(bit_off));
-
- return clk_forced_on;
+ return dpu_hw_clk_force_ctrl(&mdp->hw, &mdp->caps->clk_ctrls[clk_ctrl], enable);
}
@@ -268,51 +247,25 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
-static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
- const struct dpu_mdss_cfg *m,
- void __iomem *addr,
- struct dpu_hw_blk_reg_map *b)
-{
- int i;
-
- if (!m || !addr || !b)
- return ERR_PTR(-EINVAL);
-
- for (i = 0; i < m->mdp_count; i++) {
- if (mdp == m->mdp[i].id) {
- b->blk_addr = addr + m->mdp[i].base;
- b->log_mask = DPU_DBG_MASK_TOP;
- return &m->mdp[i];
- }
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
- const struct dpu_mdp_cfg *cfg;
- if (!addr || !m)
+ if (!addr)
return ERR_PTR(-EINVAL);
mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
- cfg = _top_offset(idx, m, addr, &mdp->hw);
- if (IS_ERR_OR_NULL(cfg)) {
- kfree(mdp);
- return ERR_PTR(-EINVAL);
- }
+ mdp->hw.blk_addr = addr + cfg->base;
+ mdp->hw.log_mask = DPU_DBG_MASK_TOP;
/*
* Assign ops
*/
- mdp->idx = idx;
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index a1a9e44bed36..8b1463d2b2f0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -137,7 +137,6 @@ struct dpu_hw_mdp {
struct dpu_hw_blk_reg_map hw;
/* top */
- enum dpu_mdp idx;
const struct dpu_mdp_cfg *caps;
/* ops */
@@ -145,12 +144,12 @@ struct dpu_hw_mdp {
};
/**
- * dpu_hw_mdptop_init - initializes the top driver for the passed idx
- * @idx: Interface index for which driver object is required
+ * dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
*/
-struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 9d2273fd2fed..18b16b2d2bf5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -546,3 +546,24 @@ void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
DPU_REG_WRITE(c, offset, cdp_cntl);
}
+
+bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
+ const struct dpu_clk_ctrl_reg *clk_ctrl_reg,
+ bool enable)
+{
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ reg_val = DPU_REG_READ(c, clk_ctrl_reg->reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(clk_ctrl_reg->bit_off);
+ else
+ new_val = reg_val & ~BIT(clk_ctrl_reg->bit_off);
+
+ DPU_REG_WRITE(c, clk_ctrl_reg->reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(clk_ctrl_reg->bit_off));
+
+ return clk_forced_on;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 1f6079f47071..4bea139081bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -367,4 +367,8 @@ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_signature_offset,
u32 *misr_value);
+bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
+ const struct dpu_clk_ctrl_reg *clk_ctrl_reg,
+ bool enable);
+
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index ebc416400382..9668fb97c047 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -43,6 +43,7 @@
#define WB_MUX 0x150
#define WB_CROP_CTRL 0x154
#define WB_CROP_OFFSET 0x158
+#define WB_CLK_CTRL 0x178
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4
@@ -175,8 +176,18 @@ static void dpu_hw_wb_bind_pingpong_blk(
DPU_REG_WRITE(c, WB_MUX, mux_cfg);
}
+static bool dpu_hw_wb_setup_clk_force_ctrl(struct dpu_hw_wb *ctx, bool enable)
+{
+ static const struct dpu_clk_ctrl_reg wb_clk_ctrl = {
+ .reg_off = WB_CLK_CTRL,
+ .bit_off = 0
+ };
+
+ return dpu_hw_clk_force_ctrl(&ctx->hw, &wb_clk_ctrl, enable);
+}
+
static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
- unsigned long features)
+ unsigned long features, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
ops->setup_outformat = dpu_hw_wb_setup_format;
@@ -192,10 +203,13 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
if (test_bit(DPU_WB_INPUT_CTRL, &features))
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
+
+ if (mdss_rev->core_major_ver >= 9)
+ ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr)
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_wb *c;
@@ -212,7 +226,7 @@ struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
/* Assign ops */
c->idx = cfg->id;
c->caps = cfg;
- _setup_wb_ops(&c->ops, c->caps->features);
+ _setup_wb_ops(&c->ops, c->caps->features, mdss_rev);
return c;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index 2d7db2efa3d0..88792f450a92 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -29,6 +29,7 @@ struct dpu_hw_wb_cfg {
* @setup_outformat: setup output format of writeback block from writeback job
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
+ * @setup_clk_force_ctrl: setup clock force control
* @bind_pingpong_blk: enable/disable the connection with ping-pong block
*/
struct dpu_hw_wb_ops {
@@ -48,6 +49,9 @@ struct dpu_hw_wb_ops {
const struct dpu_format *fmt,
bool enable);
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_wb *ctx,
+ bool enable);
+
void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
const enum dpu_pingpong pp);
};
@@ -74,10 +78,11 @@ struct dpu_hw_wb {
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_wb context
*/
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr);
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_wb_destroy(): Destroy writeback hw driver object.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index aa8499de1b9f..fe7267b3bff5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -22,6 +22,7 @@
#include "msm_drv.h"
#include "msm_mmu.h"
+#include "msm_mdss.h"
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
@@ -388,8 +389,7 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
struct icc_path *path1;
- struct drm_device *dev = dpu_kms->dev;
- struct device *dpu_dev = dev->dev;
+ struct device *dpu_dev = &dpu_kms->pdev->dev;
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
@@ -544,8 +544,6 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
- info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
-
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
@@ -794,7 +792,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
ret = PTR_ERR(crtc);
return ret;
}
- priv->crtcs[priv->num_crtcs++] = crtc;
+ priv->num_crtcs++;
}
/* All CRTCs are compatible with all encoders */
@@ -830,21 +828,9 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_kms->catalog = NULL;
- if (dpu_kms->vbif[VBIF_NRT])
- devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
- dpu_kms->vbif[VBIF_NRT] = NULL;
-
- if (dpu_kms->vbif[VBIF_RT])
- devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
- dpu_kms->vbif[VBIF_RT] = NULL;
-
if (dpu_kms->hw_mdp)
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
-
- if (dpu_kms->mmio)
- devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
- dpu_kms->mmio = NULL;
}
static void dpu_kms_destroy(struct msm_kms *kms)
@@ -890,6 +876,7 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
+ void __iomem *base;
dpu_kms = to_dpu_kms(kms);
@@ -900,37 +887,67 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
- dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
+ dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
- for (i = 0; i < cat->dspp_count; i++)
- msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
- dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
+ for (i = 0; i < cat->dspp_count; i++) {
+ base = dpu_kms->mmio + cat->dspp[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
+
+ if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
+ base + cat->dspp[i].sblk->pcc.base, "%s_%s",
+ cat->dspp[i].name,
+ cat->dspp[i].sblk->pcc.name);
+ }
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
- dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
+ dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
- for (i = 0; i < cat->pingpong_count; i++)
- msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
- dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
+ for (i = 0; i < cat->pingpong_count; i++) {
+ base = dpu_kms->mmio + cat->pingpong[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
+ cat->pingpong[i].name);
+
+ /* TE2 sub-block has length of 0, so will not print it */
+
+ if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len,
+ base + cat->pingpong[i].sblk->dither.base,
+ "%s_%s", cat->pingpong[i].name,
+ cat->pingpong[i].sblk->dither.name);
+ }
/* dump SSPP sub-blocks HW regs info */
- for (i = 0; i < cat->sspp_count; i++)
- msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
- dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+ for (i = 0; i < cat->sspp_count; i++) {
+ base = dpu_kms->mmio + cat->sspp[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
+
+ if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
+ base + cat->sspp[i].sblk->scaler_blk.base,
+ "%s_%s", cat->sspp[i].name,
+ cat->sspp[i].sblk->scaler_blk.name);
+
+ if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len,
+ base + cat->sspp[i].sblk->csc_blk.base,
+ "%s_%s", cat->sspp[i].name,
+ cat->sspp[i].sblk->csc_blk.name);
+ }
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
- dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+ dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
- dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
+ dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
@@ -943,9 +960,20 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
}
/* dump DSC sub-blocks HW regs info */
- for (i = 0; i < cat->dsc_count; i++)
- msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
- dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
+ for (i = 0; i < cat->dsc_count; i++) {
+ base = dpu_kms->mmio + cat->dsc[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
+
+ if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
+ struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
+ struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
+
+ msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s",
+ cat->dsc[i].name, enc.name);
+ msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s",
+ cat->dsc[i].name, ctl.name);
+ }
+ }
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1011,11 +1039,14 @@ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
return clk_get_rate(clk);
}
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
int i, rc = -EINVAL;
+ unsigned long max_core_clk_rate;
u32 core_rev;
if (!kms) {
@@ -1035,30 +1066,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
atomic_set(&dpu_kms->bandwidth_ref, 0);
- dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
- if (IS_ERR(dpu_kms->mmio)) {
- rc = PTR_ERR(dpu_kms->mmio);
- DPU_ERROR("mdp register memory map failed: %d\n", rc);
- dpu_kms->mmio = NULL;
- goto error;
- }
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
-
- dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
- if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
- rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
- DPU_ERROR("vbif register memory map failed: %d\n", rc);
- dpu_kms->vbif[VBIF_RT] = NULL;
- goto error;
- }
- dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
- if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
- dpu_kms->vbif[VBIF_NRT] = NULL;
- DPU_DEBUG("VBIF NRT is not defined");
- }
-
- dpu_kms_parse_data_bus_icc_path(dpu_kms);
-
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
if (rc < 0)
goto error;
@@ -1084,7 +1091,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
+ dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
+ if (IS_ERR(dpu_kms->mdss)) {
+ rc = PTR_ERR(dpu_kms->mdss);
+ DPU_ERROR("failed to get MDSS data: %d\n", rc);
+ goto power_error;
+ }
+
+ if (!dpu_kms->mdss) {
+ rc = -EINVAL;
+ DPU_ERROR("NULL MDSS data\n");
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
@@ -1092,7 +1112,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->rm_init = true;
- dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
+ dpu_kms->mmio,
dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
@@ -1115,8 +1136,14 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->hw_vbif[vbif->id] = hw;
}
- rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
- msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
+ /* TODO: use the same max_freq as in dpu_kms_hw_init */
+ max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core");
+ if (!max_core_clk_rate) {
+ DPU_DEBUG("max core clk rate not determined, using default\n");
+ max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
goto perf_err;
@@ -1162,7 +1189,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
return 0;
drm_obj_init_err:
- dpu_core_perf_destroy(&dpu_kms->perf);
hw_intr_init_err:
perf_err:
power_error:
@@ -1178,33 +1204,11 @@ static int dpu_kms_init(struct drm_device *ddev)
struct msm_drm_private *priv = ddev->dev_private;
struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms;
- int irq;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct dev_pm_opp *opp;
int ret = 0;
unsigned long max_freq = ULONG_MAX;
- dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
- if (!dpu_kms)
- return -ENOMEM;
-
- ret = devm_pm_opp_set_clkname(dev, "core");
- if (ret)
- return ret;
- /* OPP table is optional */
- ret = devm_pm_opp_of_add_table(dev);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "invalid OPP table in device tree\n");
- return ret;
- }
-
- ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
- if (ret < 0) {
- DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- return ret;
- }
- dpu_kms->num_clocks = ret;
-
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
if (!IS_ERR(opp))
dev_pm_opp_put(opp);
@@ -1217,33 +1221,79 @@ static int dpu_kms_init(struct drm_device *ddev)
return ret;
}
dpu_kms->dev = ddev;
- dpu_kms->pdev = pdev;
pm_runtime_enable(&pdev->dev);
dpu_kms->rpm_enabled = true;
- priv->kms = &dpu_kms->base;
-
- irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
- if (!irq) {
- DPU_ERROR("failed to get irq\n");
- return -EINVAL;
- }
- dpu_kms->base.irq = irq;
-
return 0;
}
static int dpu_dev_probe(struct platform_device *pdev)
{
- return msm_drv_probe(&pdev->dev, dpu_kms_init);
+ struct device *dev = &pdev->dev;
+ struct dpu_kms *dpu_kms;
+ int irq;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ dpu_kms->pdev = pdev;
+
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to parse clocks\n");
+
+ dpu_kms->num_clocks = ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "failed to get irq\n");
+
+ dpu_kms->base.irq = irq;
+
+ dpu_kms->mmio = msm_ioremap(pdev, "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ ret = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", ret);
+ dpu_kms->mmio = NULL;
+ return ret;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", ret);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ return ret;
+ }
+
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
+ if (ret)
+ return ret;
+
+ return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
}
-static int dpu_dev_remove(struct platform_device *pdev)
+static void dpu_dev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
-
- return 0;
}
static int __maybe_unused dpu_runtime_suspend(struct device *dev)
@@ -1292,8 +1342,8 @@ static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
- .prepare = msm_pm_prepare,
- .complete = msm_pm_complete,
+ .prepare = msm_kms_pm_prepare,
+ .complete = msm_kms_pm_complete,
};
static const struct of_device_id dpu_dt_match[] = {
@@ -1305,6 +1355,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
+ { .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
@@ -1318,8 +1369,8 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
- .remove = dpu_dev_remove,
- .shutdown = msm_drv_shutdown,
+ .remove_new = dpu_dev_remove,
+ .shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_dpu",
.of_match_table = dpu_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index f3bdd4f11108..b6f53ca6e962 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -67,6 +67,7 @@ struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
const struct dpu_mdss_cfg *catalog;
+ const struct msm_mdss_data *mdss;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index c2aaaded07ed..3eef5e025e12 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
int src_width, src_height, dst_height, fps;
+ u64 plane_pixel_rate, plane_bit_rate;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
@@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
- plane_bw =
- src_width * mode->vtotal * fps * fmt->bpp *
- scale_factor;
+ plane_pixel_rate = src_width * mode->vtotal * fps;
+ plane_bit_rate = plane_pixel_rate * fmt->bpp;
- plane_prefill_bw =
- src_width * hw_latency_lines * fps * fmt->bpp *
- scale_factor * mode->vtotal;
+ plane_bw = plane_bit_rate * scale_factor;
+
+ plane_prefill_bw = plane_bw * hw_latency_lines;
if ((vbp+vpw) > hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw));
@@ -333,6 +333,23 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
enable);
}
+static bool _dpu_plane_sspp_clk_force_ctrl(struct dpu_hw_sspp *sspp,
+ struct dpu_hw_mdp *mdp,
+ bool enable, bool *forced_on)
+{
+ if (sspp->ops.setup_clk_force_ctrl) {
+ *forced_on = sspp->ops.setup_clk_force_ctrl(sspp, enable);
+ return true;
+ }
+
+ if (mdp->ops.setup_clk_force_ctrl) {
+ *forced_on = mdp->ops.setup_clk_force_ctrl(mdp, sspp->cap->clk_ctrl, enable);
+ return true;
+ }
+
+ return false;
+}
+
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
@@ -348,6 +365,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ bool forced_on = false;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pipe->sspp->cap->xin_id;
@@ -357,10 +375,17 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = frame_rate;
ot_params.vbif_idx = VBIF_RT;
- ot_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
ot_params.rd = true;
+ if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
+ true, &forced_on))
+ return;
+
dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+
+ if (forced_on)
+ _dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
+ false, &forced_on);
}
/**
@@ -374,21 +399,28 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ bool forced_on = false;
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
- qos_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
qos_params.xin_id = pipe->sspp->cap->xin_id;
qos_params.num = pipe->sspp->idx - SSPP_VIG0;
qos_params.is_rt = pdpu->is_rt_pipe;
- DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d\n",
qos_params.num,
qos_params.vbif_idx,
- qos_params.xin_id, qos_params.is_rt,
- qos_params.clk_ctrl);
+ qos_params.xin_id, qos_params.is_rt);
+
+ if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
+ true, &forced_on))
+ return;
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+
+ if (forced_on)
+ _dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
+ false, &forced_on);
}
static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
@@ -733,9 +765,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct dpu_format *fmt)
+ const struct dpu_format *fmt,
+ const struct drm_display_mode *mode)
{
uint32_t min_src_size;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -774,6 +808,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return -EINVAL;
}
+ /* max clk check */
+ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
+ DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
+ return -E2BIG;
+ }
+
return 0;
}
@@ -899,12 +939,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
}
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
if (ret)
return ret;
if (r_pipe->sspp) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 471842bbb950..8759466e2f37 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -101,6 +101,7 @@ int dpu_rm_destroy(struct dpu_rm *rm)
int dpu_rm_init(struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
+ const struct msm_mdss_data *mdss_data,
void __iomem *mmio)
{
int rc, i;
@@ -145,7 +146,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
- hw = dpu_hw_pingpong_init(pp, mmio);
+ hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
@@ -161,7 +162,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
- hw = dpu_hw_intf_init(intf, mmio);
+ hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
@@ -174,7 +175,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
- hw = dpu_hw_wb_init(wb, mmio);
+ hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
@@ -230,7 +231,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
- hw = dpu_hw_sspp_init(sspp, mmio, cat->ubwc);
+ hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);
@@ -253,28 +254,19 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
}
/**
- * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
- * @peer_idx: index of other mixer in rm->mixer_blks[]
- * Return: true if rm->mixer_blks[peer_idx] is a peer of
- * rm->mixer_blks[primary_idx]
*/
-static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
- int peer_idx)
+static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
const struct dpu_lm_cfg *prim_lm_cfg;
- const struct dpu_lm_cfg *peer_cfg;
prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
- peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
- if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
- DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
- peer_cfg->id);
- return false;
- }
- return true;
+ if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
+ return prim_lm_cfg->lm_pair - LM_0;
+ return -EINVAL;
}
/**
@@ -351,7 +343,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
int lm_idx[MAX_BLOCKS];
int pp_idx[MAX_BLOCKS];
int dspp_idx[MAX_BLOCKS] = {0};
- int i, j, lm_count = 0;
+ int i, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
@@ -376,16 +368,15 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
++lm_count;
/* Valid primary mixer found, find matching peers */
- for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
- lm_count < reqs->topology.num_lm; j++) {
- if (!rm->mixer_blks[j])
+ if (lm_count < reqs->topology.num_lm) {
+ int j = _dpu_rm_get_lm_peer(rm, i);
+
+ /* ignore the peer if there is an error or if the peer was already processed */
+ if (j < 0 || j < i)
continue;
- if (!_dpu_rm_check_lm_peer(rm, i, j)) {
- DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
- LM_0 + i);
+ if (!rm->mixer_blks[j])
continue;
- }
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index d62c2edb2460..2b551566cbf4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -40,11 +40,13 @@ struct dpu_rm {
* for all HW blocks.
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
+ * @mdss_data: Pointer to MDSS / UBWC configuration
* @mmio: mapped register io address of MDP
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
+ const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 1a92d21094f4..35d03b121a0b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -168,46 +168,50 @@ TRACE_EVENT(dpu_perf_crtc_update,
);
DECLARE_EVENT_CLASS(dpu_irq_template,
- TP_PROTO(int irq_idx),
- TP_ARGS(irq_idx),
+ TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
+ TP_ARGS(irq_reg, irq_bit),
TP_STRUCT__entry(
- __field( int, irq_idx )
+ __field( unsigned int, irq_reg )
+ __field( unsigned int, irq_bit )
),
TP_fast_assign(
- __entry->irq_idx = irq_idx;
+ __entry->irq_reg = irq_reg;
+ __entry->irq_bit = irq_bit;
),
- TP_printk("irq=%d", __entry->irq_idx)
+ TP_printk("IRQ=[%d, %d]", __entry->irq_reg, __entry->irq_bit)
);
DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
- TP_PROTO(int irq_idx),
- TP_ARGS(irq_idx)
+ TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
+ TP_ARGS(irq_reg, irq_bit)
);
DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
- TP_PROTO(int irq_idx),
- TP_ARGS(irq_idx)
+ TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
+ TP_ARGS(irq_reg, irq_bit)
);
TRACE_EVENT(dpu_enc_irq_wait_success,
TP_PROTO(uint32_t drm_id, void *func,
- int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
- TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
+ unsigned int irq_reg, unsigned int irq_bit, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, func, irq_reg, irq_bit, pp_idx, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( void *, func )
- __field( int, irq_idx )
+ __field( unsigned int, irq_reg )
+ __field( unsigned int, irq_bit )
__field( enum dpu_pingpong, pp_idx )
__field( int, atomic_cnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->func = func;
- __entry->irq_idx = irq_idx;
+ __entry->irq_reg = irq_reg;
+ __entry->irq_bit = irq_bit;
__entry->pp_idx = pp_idx;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
+ TP_printk("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, atomic_cnt=%d",
__entry->drm_id, __entry->func,
- __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+ __entry->irq_reg, __entry->irq_bit, __entry->pp_idx, __entry->atomic_cnt)
);
DECLARE_EVENT_CLASS(dpu_drm_obj_template,
@@ -453,29 +457,6 @@ TRACE_EVENT(dpu_enc_trigger_flush,
__entry->extra_flush_bits, __entry->pending_flush_ret)
);
-DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
- TP_PROTO(uint32_t drm_id, ktime_t time),
- TP_ARGS(drm_id, time),
- TP_STRUCT__entry(
- __field( uint32_t, drm_id )
- __field( ktime_t, time )
- ),
- TP_fast_assign(
- __entry->drm_id = drm_id;
- __entry->time = time;
- ),
- TP_printk("id=%u, time=%lld", __entry->drm_id,
- ktime_to_ms(__entry->time))
-);
-DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
- TP_PROTO(uint32_t drm_id, ktime_t time),
- TP_ARGS(drm_id, time)
-);
-DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
- TP_PROTO(uint32_t drm_id, ktime_t time),
- TP_ARGS(drm_id, time)
-);
-
DECLARE_EVENT_CLASS(dpu_id_event_template,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event),
@@ -507,12 +488,13 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
);
TRACE_EVENT(dpu_enc_wait_event_timeout,
- TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
+ TP_PROTO(uint32_t drm_id, unsigned int irq_reg, unsigned int irq_bit, int rc, s64 time,
s64 expected_time, int atomic_cnt),
- TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
+ TP_ARGS(drm_id, irq_reg, irq_bit, rc, time, expected_time, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
- __field( int, irq_idx )
+ __field( unsigned int, irq_reg )
+ __field( unsigned int, irq_bit )
__field( int, rc )
__field( s64, time )
__field( s64, expected_time )
@@ -520,14 +502,15 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
),
TP_fast_assign(
__entry->drm_id = drm_id;
- __entry->irq_idx = irq_idx;
+ __entry->irq_reg = irq_reg;
+ __entry->irq_bit = irq_bit;
__entry->rc = rc;
__entry->time = time;
__entry->expected_time = expected_time;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
- __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
+ TP_printk("id=%u, IRQ=[%d, %d], rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->irq_reg, __entry->irq_bit, __entry->rc, __entry->time,
__entry->expected_time, __entry->atomic_cnt)
);
@@ -886,30 +869,34 @@ TRACE_EVENT(dpu_intf_connect_ext_te,
);
TRACE_EVENT(dpu_core_irq_register_callback,
- TP_PROTO(int irq_idx, void *callback),
- TP_ARGS(irq_idx, callback),
+ TP_PROTO(unsigned int irq_reg, unsigned int irq_bit, void *callback),
+ TP_ARGS(irq_reg, irq_bit, callback),
TP_STRUCT__entry(
- __field( int, irq_idx )
+ __field( unsigned int, irq_reg )
+ __field( unsigned int, irq_bit )
__field( void *, callback)
),
TP_fast_assign(
- __entry->irq_idx = irq_idx;
+ __entry->irq_reg = irq_reg;
+ __entry->irq_bit = irq_bit;
__entry->callback = callback;
),
- TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
+ TP_printk("IRQ=[%d, %d] callback:%ps", __entry->irq_reg, __entry->irq_bit,
__entry->callback)
);
TRACE_EVENT(dpu_core_irq_unregister_callback,
- TP_PROTO(int irq_idx),
- TP_ARGS(irq_idx),
+ TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
+ TP_ARGS(irq_reg, irq_bit),
TP_STRUCT__entry(
- __field( int, irq_idx )
+ __field( unsigned int, irq_reg )
+ __field( unsigned int, irq_bit )
),
TP_fast_assign(
- __entry->irq_idx = irq_idx;
+ __entry->irq_reg = irq_reg;
+ __entry->irq_bit = irq_bit;
),
- TP_printk("irq_idx:%d", __entry->irq_idx)
+ TP_printk("IRQ=[%d, %d]", __entry->irq_reg, __entry->irq_bit)
);
TRACE_EVENT(dpu_core_perf_update_clk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 1305e250b71e..47c02b98eac3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -169,23 +169,16 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
struct dpu_hw_vbif *vbif;
- struct dpu_hw_mdp *mdp;
- bool forced_on = false;
u32 ot_lim;
int ret;
- mdp = dpu_kms->hw_mdp;
-
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
- if (!vbif || !mdp) {
- DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
- vbif != NULL, mdp != NULL);
+ if (!vbif) {
+ DRM_DEBUG_ATOMIC("invalid arguments vbif %d\n", vbif != NULL);
return;
}
- if (!mdp->ops.setup_clk_force_ctrl ||
- !vbif->ops.set_limit_conf ||
- !vbif->ops.set_halt_ctrl)
+ if (!vbif->ops.set_limit_conf || !vbif->ops.set_halt_ctrl)
return;
/* set write_gather_en for all write clients */
@@ -200,8 +193,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
- forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
-
vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
@@ -211,25 +202,19 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
-
- if (forced_on)
- mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
struct dpu_hw_vbif *vbif;
- struct dpu_hw_mdp *mdp;
- bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
- if (!params || !dpu_kms->hw_mdp) {
+ if (!params) {
DPU_ERROR("invalid arguments\n");
return;
}
- mdp = dpu_kms->hw_mdp;
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
@@ -238,7 +223,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
return;
}
- if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ if (!vbif->ops.set_qos_remap) {
DRM_DEBUG_ATOMIC("qos remap not supported\n");
return;
}
@@ -251,8 +236,6 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
return;
}
- forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
-
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
dpu_vbif_name(params->vbif_idx), params->xin_id, i,
@@ -260,9 +243,6 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
}
-
- if (forced_on)
- mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index ab490177d886..e1b1f7f4e4be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -16,13 +16,11 @@ struct dpu_vbif_set_ot_params {
bool rd;
bool is_wfd;
u32 vbif_idx;
- u32 clk_ctrl;
};
struct dpu_vbif_set_memtype_params {
u32 xin_id;
u32 vbif_idx;
- u32 clk_ctrl;
bool is_cacheable;
};
@@ -30,14 +28,12 @@ struct dpu_vbif_set_memtype_params {
* struct dpu_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
* @xin_id: client interface identifier
- * @clk_ctrl: clock control identifier of the xin
* @num: pipe identifier (debug only)
* @is_rt: true if pipe is used in real-time use case
*/
struct dpu_vbif_set_qos_params {
u32 vbif_idx;
u32 xin_id;
- u32 clk_ctrl;
u32 num;
bool is_rt;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 6e37072ed302..4ba1cb74ad76 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -135,8 +135,6 @@ static void mdp4_destroy(struct msm_kms *kms)
pm_runtime_disable(dev);
mdp_kms_destroy(&mdp4_kms->base);
-
- kfree(mdp4_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
@@ -332,7 +330,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
goto fail;
}
- priv->crtcs[priv->num_crtcs++] = crtc;
+ priv->num_crtcs++;
}
/*
@@ -380,57 +378,27 @@ static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct mdp4_kms *mdp4_kms;
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
- int irq, ret;
+ int ret;
u32 major, minor;
unsigned long max_clk;
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
max_clk = 266667000;
- mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
- if (!mdp4_kms) {
- DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
- return -ENOMEM;
- }
-
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
goto fail;
}
- priv->kms = &mdp4_kms->base.base;
kms = priv->kms;
mdp4_kms->dev = dev;
- mdp4_kms->mmio = msm_ioremap(pdev, NULL);
- if (IS_ERR(mdp4_kms->mmio)) {
- ret = PTR_ERR(mdp4_kms->mmio);
- goto fail;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
- goto fail;
- }
-
- kms->irq = irq;
-
- /* NOTE: driver for this regulator still missing upstream.. use
- * _get_exclusive() and ignore the error if it does not exist
- * (and hope that the bootloader left it on for us)
- */
- mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
- if (IS_ERR(mdp4_kms->vdd))
- mdp4_kms->vdd = NULL;
-
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
@@ -439,24 +407,6 @@ static int mdp4_kms_init(struct drm_device *dev)
}
}
- mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
- if (IS_ERR(mdp4_kms->clk)) {
- DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
- ret = PTR_ERR(mdp4_kms->clk);
- goto fail;
- }
-
- mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
- if (IS_ERR(mdp4_kms->pclk))
- mdp4_kms->pclk = NULL;
-
- mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
- if (IS_ERR(mdp4_kms->axi_clk)) {
- DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
- ret = PTR_ERR(mdp4_kms->axi_clk);
- goto fail;
- }
-
clk_set_rate(mdp4_kms->clk, max_clk);
read_mdp_hw_revision(mdp4_kms, &major, &minor);
@@ -471,10 +421,9 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_kms->rev = minor;
if (mdp4_kms->rev >= 2) {
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
+ if (!mdp4_kms->lut_clk) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
+ ret = -ENODEV;
goto fail;
}
clk_set_rate(mdp4_kms->lut_clk, max_clk);
@@ -552,20 +501,64 @@ fail:
}
static const struct dev_pm_ops mdp4_pm_ops = {
- .prepare = msm_pm_prepare,
- .complete = msm_pm_complete,
+ .prepare = msm_kms_pm_prepare,
+ .complete = msm_kms_pm_complete,
};
static int mdp4_probe(struct platform_device *pdev)
{
- return msm_drv_probe(&pdev->dev, mdp4_kms_init);
+ struct device *dev = &pdev->dev;
+ struct mdp4_kms *mdp4_kms;
+ int irq;
+
+ mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL);
+ if (IS_ERR(mdp4_kms->mmio))
+ return PTR_ERR(mdp4_kms->mmio);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "failed to get irq\n");
+
+ mdp4_kms->base.base.irq = irq;
+
+ /* NOTE: driver for this regulator still missing upstream.. use
+ * _get_exclusive() and ignore the error if it does not exist
+ * (and hope that the bootloader left it on for us)
+ */
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk))
+ return dev_err_probe(dev, PTR_ERR(mdp4_kms->clk), "failed to get core_clk\n");
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(mdp4_kms->axi_clk))
+ return dev_err_probe(dev, PTR_ERR(mdp4_kms->axi_clk), "failed to get axi_clk\n");
+
+ /*
+ * This is required for revn >= 2. Handle errors here and let the kms
+ * init bail out if the clock is not provided.
+ */
+ mdp4_kms->lut_clk = devm_clk_get_optional(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk))
+ return dev_err_probe(dev, PTR_ERR(mdp4_kms->lut_clk), "failed to get lut_clk\n");
+
+ return msm_drv_probe(&pdev->dev, mdp4_kms_init, &mdp4_kms->base.base);
}
-static int mdp4_remove(struct platform_device *pdev)
+static void mdp4_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
-
- return 0;
}
static const struct of_device_id mdp4_dt_match[] = {
@@ -576,8 +569,8 @@ MODULE_DEVICE_TABLE(of, mdp4_dt_match);
static struct platform_driver mdp4_platform_driver = {
.probe = mdp4_probe,
- .remove = mdp4_remove,
- .shutdown = msm_drv_shutdown,
+ .remove_new = mdp4_remove,
+ .shutdown = msm_kms_shutdown,
.driver = {
.name = "mdp4",
.of_match_table = mdp4_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 323079cfd698..ec933d597e20 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -497,7 +497,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
- priv->crtcs[priv->num_crtcs++] = crtc;
+ priv->num_crtcs++;
}
/*
@@ -554,20 +554,16 @@ static int mdp5_kms_init(struct drm_device *dev)
struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
- struct msm_kms *kms;
+ struct msm_kms *kms = priv->kms;
struct msm_gem_address_space *aspace;
- int irq, i, ret;
+ int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
if (ret)
return ret;
- /* priv->kms would have been populated by the MDP5 driver */
- kms = priv->kms;
- if (!kms)
- return -ENOMEM;
-
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
pdev = mdp5_kms->pdev;
ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
@@ -576,15 +572,6 @@ static int mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (!irq) {
- ret = -EINVAL;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
- goto fail;
- }
-
- kms->irq = irq;
-
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* make sure things are off before attaching iommu (bootloader could
@@ -787,60 +774,23 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct mdp5_kms *mdp5_kms;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_cfg *config;
u32 major, minor;
int ret;
- mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
- if (!mdp5_kms) {
- ret = -ENOMEM;
- goto fail;
- }
-
- spin_lock_init(&mdp5_kms->resource_lock);
-
mdp5_kms->dev = dev;
- mdp5_kms->pdev = pdev;
ret = mdp5_global_obj_init(mdp5_kms);
if (ret)
goto fail;
- mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
- if (IS_ERR(mdp5_kms->mmio)) {
- ret = PTR_ERR(mdp5_kms->mmio);
- goto fail;
- }
-
- /* mandatory clocks: */
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
- if (ret)
- goto fail;
- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
- if (ret)
- goto fail;
- ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
- if (ret)
- goto fail;
- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
- if (ret)
- goto fail;
-
- /* optional clocks: */
- get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
- get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
- get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
-
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
clk_set_rate(mdp5_kms->core_clk, 200000000);
- /* set uninit-ed kms */
- priv->kms = &mdp5_kms->base.base;
-
pm_runtime_enable(&pdev->dev);
mdp5_kms->rpm_enabled = true;
@@ -894,8 +844,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- if (mdp5_kms)
- mdp5_destroy(mdp5_kms);
+ mdp5_destroy(mdp5_kms);
return ret;
}
@@ -931,22 +880,59 @@ static int mdp5_setup_interconnect(struct platform_device *pdev)
static int mdp5_dev_probe(struct platform_device *pdev)
{
- int ret;
+ struct mdp5_kms *mdp5_kms;
+ int ret, irq;
DBG("");
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms)
+ return -ENOMEM;
+
ret = mdp5_setup_interconnect(pdev);
if (ret)
return ret;
- return msm_drv_probe(&pdev->dev, mdp5_kms_init);
+ mdp5_kms->pdev = pdev;
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
+ if (IS_ERR(mdp5_kms->mmio))
+ return PTR_ERR(mdp5_kms->mmio);
+
+ /* mandatory clocks: */
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
+ if (ret)
+ return ret;
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
+ if (ret)
+ return ret;
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
+ if (ret)
+ return ret;
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
+ if (ret)
+ return ret;
+
+ /* optional clocks: */
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
+ get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
+ get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "failed to get irq\n");
+
+ mdp5_kms->base.base.irq = irq;
+
+ return msm_drv_probe(&pdev->dev, mdp5_kms_init, &mdp5_kms->base.base);
}
-static int mdp5_dev_remove(struct platform_device *pdev)
+static void mdp5_dev_remove(struct platform_device *pdev)
{
DBG("");
component_master_del(&pdev->dev, &msm_drm_ops);
- return 0;
}
static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
@@ -973,8 +959,8 @@ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
- .prepare = msm_pm_prepare,
- .complete = msm_pm_complete,
+ .prepare = msm_kms_pm_prepare,
+ .complete = msm_kms_pm_complete,
};
static const struct of_device_id mdp5_dt_match[] = {
@@ -987,8 +973,8 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
- .remove = mdp5_dev_remove,
- .shutdown = msm_drv_shutdown,
+ .remove_new = mdp5_dev_remove,
+ .shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index bd2c4ac45601..0d5ff03cb091 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -130,8 +130,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
- if (state->fb)
- drm_framebuffer_put(state->fb);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 56a3063545ec..b68682c1b5bc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -379,7 +379,7 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
{
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
- struct mdp5_smp *smp = NULL;
+ struct mdp5_smp *smp;
int ret;
smp = kzalloc(sizeof(*smp), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index acfe1b31e079..add72bbc28b1 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -192,5 +192,5 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
new_blk->base_addr = base_addr;
msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
- list_add(&new_blk->node, &disp_state->blocks);
+ list_add_tail(&new_blk->node, &disp_state->blocks);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 1245c7aa49df..4a2e479723a8 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index a7a5c7e0ab92..77a8d9366ed7 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
while (--link_train_max_retries) {
- rc = dp_ctrl_reinitialize_mainlink(ctrl);
- if (rc) {
- DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
- rc);
- break;
- }
-
training_step = DP_TRAINING_NONE;
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
@@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
}
+
+ rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ if (rc) {
+ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
+ break;
+ }
}
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 76f13954015b..1b88fb52726f 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -88,7 +88,6 @@ struct dp_display_private {
bool audio_supported;
struct drm_device *drm_dev;
- struct platform_device *pdev;
struct dentry *root;
struct dp_parser *parser;
@@ -341,21 +340,6 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
-static bool dp_display_is_ds_bridge(struct dp_panel *panel)
-{
- return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT);
-}
-
-static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
-{
- drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n",
- dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
- dp->link->sink_count);
- return dp_display_is_ds_bridge(dp->panel) &&
- (dp->link->sink_count == 0);
-}
-
static void dp_display_send_hpd_event(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@@ -379,8 +363,14 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
}
/* reset video pattern flag on disconnect */
- if (!hpd)
+ if (!hpd) {
dp->panel->video_test = false;
+ if (!dp->dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->dp_display.connector,
+ connector_status_disconnected,
+ dp->panel->dpcd,
+ dp->panel->downstream_ports);
+ }
dp->dp_display.is_connected = hpd;
@@ -408,6 +398,12 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp_link_process_request(dp->link);
+ if (!dp->dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->dp_display.connector,
+ connector_status_connected,
+ dp->panel->dpcd,
+ dp->panel->downstream_ports);
+
edid = dp->panel->edid;
dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
@@ -514,7 +510,7 @@ static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
{
int rc = 0;
- if (dp_display_is_sink_count_zero(dp)) {
+ if (drm_dp_is_branch(dp->panel->dpcd) && dp->link->sink_count == 0) {
drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
@@ -603,7 +599,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+ ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
@@ -651,7 +647,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
if (dp->link->sink_count == 0) {
dp_display_host_phy_exit(dp);
}
- dp_display_notify_disconnect(&dp->pdev->dev);
+ dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
@@ -661,7 +657,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_ctrl_off_link(dp->ctrl);
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
- dp_display_notify_disconnect(&dp->pdev->dev);
+ dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -670,7 +666,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
- dp_display_notify_disconnect(&dp->pdev->dev);
+ dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
if (state == ST_DISPLAY_OFF) {
dp->hpd_state = ST_DISCONNECTED;
@@ -712,7 +708,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- dp_display_usbpd_attention_cb(&dp->pdev->dev);
+ dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
@@ -733,12 +729,12 @@ static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
static int dp_init_sub_modules(struct dp_display_private *dp)
{
int rc = 0;
- struct device *dev = &dp->pdev->dev;
+ struct device *dev = &dp->dp_display.pdev->dev;
struct dp_panel_in panel_in = {
.dev = dev,
};
- dp->parser = dp_parser_get(dp->pdev);
+ dp->parser = dp_parser_get(dp->dp_display.pdev);
if (IS_ERR(dp->parser)) {
rc = PTR_ERR(dp->parser);
DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
@@ -799,7 +795,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
+ dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -1205,7 +1201,7 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+ dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0);
if (!dp->irq) {
DRM_ERROR("failed to get irq\n");
return -EINVAL;
@@ -1261,7 +1257,7 @@ static int dp_display_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
- dp->pdev = pdev;
+ dp->dp_display.pdev = pdev;
dp->name = "drm_dp";
dp->id = desc->id;
dp->dp_display.connector_type = desc->connector_type;
@@ -1296,7 +1292,7 @@ static int dp_display_probe(struct platform_device *pdev)
return rc;
}
-static int dp_display_remove(struct platform_device *pdev)
+static void dp_display_remove(struct platform_device *pdev)
{
struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
@@ -1304,8 +1300,6 @@ static int dp_display_remove(struct platform_device *pdev)
dp_display_deinit_sub_modules(dp);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static int dp_pm_resume(struct device *dev)
@@ -1415,7 +1409,7 @@ static const struct dev_pm_ops dp_pm_ops = {
static struct platform_driver dp_display_driver = {
.probe = dp_display_probe,
- .remove = dp_display_remove,
+ .remove_new = dp_display_remove,
.driver = {
.name = "msm-dp-display",
.of_match_table = dp_dt_match,
@@ -1469,7 +1463,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
int rc;
dp = container_of(dp_display, struct dp_display_private, dp_display);
- dev = &dp->pdev->dev;
+ dev = &dp->dp_display.pdev->dev;
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
@@ -1489,7 +1483,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
- dev = &dp_priv->pdev->dev;
+ dev = &dp_priv->dp_display.pdev->dev;
aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
if (aux_bus && dp->is_edp) {
@@ -1541,7 +1535,6 @@ error:
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
struct dp_display_private *dp_priv;
int ret;
@@ -1559,17 +1552,13 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
if (ret)
return ret;
- dp_display->bridge = dp_bridge_init(dp_display, dev, encoder);
- if (IS_ERR(dp_display->bridge)) {
- ret = PTR_ERR(dp_display->bridge);
+ ret = dp_bridge_init(dp_display, dev, encoder);
+ if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
- dp_display->bridge = NULL;
return ret;
}
- priv->bridges[priv->num_bridges++] = dp_display->bridge;
-
dp_display->connector = dp_drm_connector_init(dp_display, encoder);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 1e9415ab15d8..f66cdbc35785 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -12,6 +12,7 @@
struct msm_dp {
struct drm_device *drm_dev;
+ struct platform_device *pdev;
struct device *codec_dev;
struct drm_bridge *bridge;
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 785d76639497..e3bdd7dd4cdc 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -272,7 +272,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_check = edp_bridge_atomic_check,
};
-struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
int rc;
@@ -281,7 +281,7 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
if (!dp_bridge)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
dp_bridge->dp_display = dp_display;
@@ -307,14 +307,18 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
DRM_BRIDGE_OP_MODES;
}
- drm_bridge_add(bridge);
+ rc = devm_drm_bridge_add(dev->dev, bridge);
+ if (rc) {
+ DRM_ERROR("failed to add bridge, rc=%d\n", rc);
+
+ return rc;
+ }
rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc) {
DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
- drm_bridge_remove(bridge);
- return ERR_PTR(rc);
+ return rc;
}
if (dp_display->next_bridge) {
@@ -323,12 +327,13 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
- drm_bridge_remove(bridge);
- return ERR_PTR(rc);
+ return rc;
}
}
- return bridge;
+ dp_display->bridge = bridge;
+
+ return 0;
}
/* connector initialization */
@@ -340,6 +345,9 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr
if (IS_ERR(connector))
return connector;
+ if (!dp_display->is_edp)
+ drm_connector_attach_dp_subconnector_property(connector);
+
drm_connector_attach_encoder(connector, encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index afe79b85e183..b3d684db2383 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -20,7 +20,7 @@ struct msm_dp_bridge {
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder);
-struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 42427129acea..98427d45e9a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -712,49 +712,17 @@ end:
return ret;
}
-/**
- * dp_link_parse_sink_count() - parses the sink count
- * @dp_link: pointer to link module data
- *
- * Parses the DPCD to check if there is an update to the sink count
- * (Byte 0x200), and whether all the sink devices connected have Content
- * Protection enabled.
- */
-static int dp_link_parse_sink_count(struct dp_link *dp_link)
-{
- ssize_t rlen;
- bool cp_ready;
-
- struct dp_link_private *link = container_of(dp_link,
- struct dp_link_private, dp_link);
-
- rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
- &link->dp_link.sink_count);
- if (rlen < 0) {
- DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
- return rlen;
- }
-
- cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
-
- link->dp_link.sink_count =
- DP_GET_SINK_COUNT(link->dp_link.sink_count);
-
- drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n",
- link->dp_link.sink_count, cp_ready);
- return 0;
-}
-
static int dp_link_parse_sink_status_field(struct dp_link_private *link)
{
- int len = 0;
+ int len;
link->prev_sink_count = link->dp_link.sink_count;
- len = dp_link_parse_sink_count(&link->dp_link);
+ len = drm_dp_read_sink_count(link->aux);
if (len < 0) {
DRM_ERROR("DP parse sink count failed\n");
return len;
}
+ link->dp_link.sink_count = len;
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
@@ -1090,7 +1058,7 @@ int dp_link_process_request(struct dp_link *dp_link)
} else if (dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
} else if (dp_link_psr_capability_changed(link)) {
- drm_dbg_dp(link->drm_dev, "PSR Capability changed");
+ drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
} else {
ret = dp_link_process_link_status_update(link);
if (!ret) {
@@ -1107,7 +1075,7 @@ int dp_link_process_request(struct dp_link *dp_link)
}
}
- drm_dbg_dp(link->drm_dev, "sink request=%#x",
+ drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
dp_link->sink_request);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 42d52510ffd4..127f6af995cd 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -17,7 +17,6 @@ struct dp_panel_private {
struct dp_link *link;
struct dp_catalog *catalog;
bool panel_on;
- bool aux_cfg_update_done;
};
static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
@@ -43,58 +42,24 @@ static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
- int rc = 0;
- size_t len;
- ssize_t rlen;
+ int rc;
struct dp_panel_private *panel;
struct dp_link_info *link_info;
- u8 *dpcd, major = 0, minor = 0, temp;
- u32 offset = DP_DPCD_REV;
+ u8 *dpcd, major, minor;
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
dpcd = dp_panel->dpcd;
+ rc = drm_dp_read_dpcd_caps(panel->aux, dpcd);
+ if (rc)
+ return rc;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
link_info = &dp_panel->link_info;
-
- rlen = drm_dp_dpcd_read(panel->aux, offset,
- dpcd, (DP_RECEIVER_CAP_SIZE + 1));
- if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
- DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
- if (rlen == -ETIMEDOUT)
- rc = rlen;
- else
- rc = -EINVAL;
-
- goto end;
- }
-
- temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
-
- /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
- if (temp & BIT(7)) {
- drm_dbg_dp(panel->drm_dev,
- "using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
- offset = DPRX_EXTENDED_DPCD_FIELD;
- }
-
- rlen = drm_dp_dpcd_read(panel->aux, offset,
- dpcd, (DP_RECEIVER_CAP_SIZE + 1));
- if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
- DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
- if (rlen == -ETIMEDOUT)
- rc = rlen;
- else
- rc = -EINVAL;
-
- goto end;
- }
-
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
- link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
- link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ link_info->rate = drm_dp_max_link_rate(dpcd);
+ link_info->num_lanes = drm_dp_max_lane_count(dpcd);
/* Limit data lanes from data-lanes of endpoint property of dtsi */
if (link_info->num_lanes > dp_panel->max_dp_lanes)
@@ -111,25 +76,8 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
- dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
- dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
-
- if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
- dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
- dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
- len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
-
- rlen = drm_dp_dpcd_read(panel->aux,
- DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
- if (rlen < len) {
- DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
- rc = -EINVAL;
- goto end;
- }
- }
-
dp_panel_read_psr_cap(panel);
-end:
+
return rc;
}
@@ -179,8 +127,8 @@ static int dp_panel_update_modes(struct drm_connector *connector,
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
- int rc = 0, bw_code;
- int rlen, count;
+ int rc, bw_code;
+ int count;
struct dp_panel_private *panel;
if (!dp_panel || !connector) {
@@ -205,20 +153,19 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
return -EINVAL;
}
- if (dp_panel->dfp_present) {
- rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
- &count, 1);
- if (rlen == 1) {
- count = DP_GET_SINK_COUNT(count);
- if (!count) {
- DRM_ERROR("no downstream ports connected\n");
- panel->link->sink_count = 0;
- rc = -ENOTCONN;
- goto end;
- }
+ if (drm_dp_is_branch(dp_panel->dpcd)) {
+ count = drm_dp_read_sink_count(panel->aux);
+ if (!count) {
+ panel->link->sink_count = 0;
+ return -ENOTCONN;
}
}
+ rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd,
+ dp_panel->downstream_ports);
+ if (rc)
+ return rc;
+
kfree(dp_panel->edid);
dp_panel->edid = NULL;
@@ -233,19 +180,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
}
}
- if (panel->aux_cfg_update_done) {
- drm_dbg_dp(panel->drm_dev,
- "read DPCD with updated AUX config\n");
- rc = dp_panel_read_dpcd(dp_panel);
- bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
- !is_lane_count_valid(dp_panel->link_info.num_lanes)
- || (bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
- }
- panel->aux_cfg_update_done = false;
- }
end:
return rc;
}
@@ -289,26 +223,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
- struct edid *last_block;
- u8 *raw_edid;
- bool is_edid_corrupt = false;
-
- if (!edid) {
- DRM_ERROR("invalid edid input\n");
- return 0;
- }
-
- raw_edid = (u8 *)edid;
- raw_edid += (edid->extensions * EDID_LENGTH);
- last_block = (struct edid *)raw_edid;
+ edid += edid->extensions;
- /* block type extension */
- drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
- if (!is_edid_corrupt)
- return last_block->checksum;
-
- DRM_ERROR("Invalid block, no checksum\n");
- return 0;
+ return edid->checksum;
}
void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
@@ -490,7 +407,6 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
- panel->aux_cfg_update_done = false;
return dp_panel;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index ed1030e17e1b..a0dfc579c5f9 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -13,11 +13,6 @@
struct edid;
-#define DPRX_EXTENDED_DPCD_FIELD 0x2200
-
-#define DP_DOWNSTREAM_PORTS 4
-#define DP_DOWNSTREAM_CAP_SIZE 4
-
struct dp_display_mode {
struct drm_display_mode drm_mode;
u32 capabilities;
@@ -40,10 +35,8 @@ struct dp_panel_psr {
struct dp_panel {
/* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
- u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
- u32 ds_port_cnt;
- u32 dfp_present;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct dp_link_info link_info;
struct drm_dp_desc desc;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index baab79ab6e74..c6bd7bf15605 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -17,6 +17,11 @@ struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
+bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi_host_is_wide_bus_enabled(msm_dsi->host);
+}
+
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
@@ -126,6 +131,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ msm_dsi_tx_buf_free(msm_dsi->host);
priv->dsi[msm_dsi->id] = NULL;
}
@@ -161,14 +167,12 @@ static int dsi_dev_probe(struct platform_device *pdev)
return 0;
}
-static int dsi_dev_remove(struct platform_device *pdev)
+static void dsi_dev_remove(struct platform_device *pdev)
{
struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
DBG("");
dsi_destroy(msm_dsi);
-
- return 0;
}
static const struct of_device_id dt_match[] = {
@@ -187,7 +191,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
- .remove = dsi_dev_remove,
+ .remove_new = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
@@ -212,20 +216,14 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
int ret;
- if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
- DRM_DEV_ERROR(dev->dev, "too many bridges\n");
- return -ENOSPC;
- }
-
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
- goto fail;
+ return ret;
}
if (msm_dsi_is_bonded_dsi(msm_dsi) &&
@@ -239,32 +237,20 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->encoder = encoder;
- msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
- if (IS_ERR(msm_dsi->bridge)) {
- ret = PTR_ERR(msm_dsi->bridge);
+ ret = msm_dsi_manager_bridge_init(msm_dsi);
+ if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
- msm_dsi->bridge = NULL;
- goto fail;
+ return ret;
}
ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
- goto fail;
+ return ret;
}
- priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
-
return 0;
-fail:
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
-
- return ret;
}
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index bd3763a5d723..28379b1af63f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -56,8 +56,7 @@ struct msm_dsi {
};
/* dsi manager */
-struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
-void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi);
int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
@@ -125,6 +124,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
@@ -134,6 +134,7 @@ int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
+bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index a4a154601114..2a7d980e12c3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -664,6 +664,7 @@ static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap v
return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
}
#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000
+#define DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN 0x00100000
#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8
#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 8a5fb6df7210..1f98ff74ceb0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -160,6 +160,7 @@ static const char * const dsi_v2_4_clk_names[] = {
static const struct regulator_bulk_data dsi_v2_4_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+ { .supply = "refgen" },
};
static const struct msm_dsi_config sdm845_dsi_cfg = {
@@ -191,6 +192,7 @@ static const struct msm_dsi_config sm8550_dsi_cfg = {
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
+ { .supply = "refgen" },
};
static const struct msm_dsi_config sc7280_dsi_cfg = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 3f6dfb4f9d5a..deeecdfd6c4e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -10,7 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
@@ -147,6 +147,7 @@ struct msm_dsi_host {
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
+ struct msm_gem_address_space *aspace;
/* DSI v2 TX buffer */
void *tx_buf;
@@ -710,6 +711,15 @@ static void dsi_ctrl_disable(struct msm_dsi_host *msm_host)
dsi_write(msm_host, REG_DSI_CTRL, 0);
}
+bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return msm_host->dsc &&
+ (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
+ msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_5_0);
+}
+
static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
{
@@ -752,6 +762,19 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
/* Always insert DCS command */
data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
+ data = dsi_read(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2);
+
+ if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3)
+ data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE;
+
+ /* TODO: Allow for video-mode support once tested/fixed */
+ if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base))
+ data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN;
+
+ dsi_write(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2, data);
+ }
}
dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
@@ -887,6 +910,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
u32 hdisplay = mode->hdisplay;
u32 wc;
int ret;
+ bool wide_bus_enabled = msm_dsi_host_is_wide_bus_enabled(&msm_host->base);
DBG("");
@@ -907,6 +931,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->dsc) {
struct drm_dsc_config *dsc = msm_host->dsc;
+ u32 bytes_per_pclk;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -930,7 +955,13 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
* pulse width same
*/
h_total -= hdisplay;
- hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), 3);
+ if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ bytes_per_pclk = 6;
+ else
+ bytes_per_pclk = 3;
+
+ hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk);
+
h_total += hdisplay;
ha_end = ha_start + hdisplay;
}
@@ -1075,9 +1106,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
+ u32 data;
+
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
+ data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ /* if video mode engine is not busy, its because
+ * either timing engine was not turned on or the
+ * DSI controller has finished transmitting the video
+ * data already, so no need to wait in those cases
+ */
+ if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+ return;
+
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
@@ -1092,8 +1135,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
+ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
+
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
- priv->kms->aspace,
+ msm_host->aspace,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
@@ -1122,10 +1167,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
return 0;
}
-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_device *dev = msm_host->dev;
- struct msm_drm_private *priv;
/*
* This is possible if we're tearing down before we've had a chance to
@@ -1136,11 +1181,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
if (!dev)
return;
- priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
- drm_gem_object_put(msm_host->tx_gem_obj);
+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
+ msm_gem_address_space_put(msm_host->aspace);
msm_host->tx_gem_obj = NULL;
+ msm_host->aspace = NULL;
}
if (msm_host->tx_buf)
@@ -1887,10 +1932,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (msm_host->irq < 0) {
- ret = msm_host->irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
- return ret;
+ if (!msm_host->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -EINVAL;
}
/* do not autoenable, will be enabled later */
@@ -1927,7 +1971,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
DBG("");
- dsi_tx_buf_free(msm_host);
if (msm_host->workqueue) {
destroy_workqueue(msm_host->workqueue);
msm_host->workqueue = NULL;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 28b8012a21f2..896f369fdd53 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -466,9 +466,8 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
};
/* initialize bridge */
-struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi)
{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_bridge *bridge = NULL;
struct dsi_bridge *dsi_bridge;
struct drm_encoder *encoder;
@@ -476,31 +475,27 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
sizeof(*dsi_bridge), GFP_KERNEL);
- if (!dsi_bridge) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!dsi_bridge)
+ return -ENOMEM;
- dsi_bridge->id = id;
+ dsi_bridge->id = msm_dsi->id;
encoder = msm_dsi->encoder;
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- drm_bridge_add(bridge);
+ ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
+ if (ret)
+ return ret;
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
- goto fail;
+ return ret;
- return bridge;
+ msm_dsi->bridge = bridge;
-fail:
- if (bridge)
- msm_dsi_manager_bridge_destroy(bridge);
-
- return ERR_PTR(ret);
+ return 0;
}
int msm_dsi_manager_ext_bridge_init(u8 id)
@@ -557,11 +552,6 @@ int msm_dsi_manager_ext_bridge_init(u8 id)
return 0;
}
-void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
-{
- drm_bridge_remove(bridge);
-}
-
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9d5795c58a98..05621e5e7d63 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -561,6 +561,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_660_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-8953",
.data = &dsi_phy_14nm_8953_cfgs },
+ { .compatible = "qcom,sm6125-dsi-phy-14nm",
+ .data = &dsi_phy_14nm_2290_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 3b1ed02f644d..89a6344bc865 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -918,7 +918,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45;
- vreg_ctrl_1 = 0x45;
+ vreg_ctrl_1 = 0x41;
glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x00;
} else {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 3132105a2a43..c8ebd75176bb 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -7,6 +7,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
@@ -158,24 +160,16 @@ fail:
int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct drm_device *dev, struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
int ret;
- if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
- DRM_DEV_ERROR(dev->dev, "too many bridges\n");
- return -ENOSPC;
- }
-
hdmi->dev = dev;
hdmi->encoder = encoder;
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
- hdmi->bridge = msm_hdmi_bridge_init(hdmi);
- if (IS_ERR(hdmi->bridge)) {
- ret = PTR_ERR(hdmi->bridge);
+ ret = msm_hdmi_bridge_init(hdmi);
+ if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
- hdmi->bridge = NULL;
goto fail;
}
@@ -213,16 +207,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- priv->bridges[priv->num_bridges++] = hdmi->bridge;
-
return 0;
fail:
- /* bridge is normally destroyed by drm: */
- if (hdmi->bridge) {
- msm_hdmi_bridge_destroy(hdmi->bridge);
- hdmi->bridge = NULL;
- }
if (hdmi->connector) {
hdmi->connector->funcs->destroy(hdmi->connector);
hdmi->connector = NULL;
@@ -393,6 +380,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
+ if (priv->hdmi->bridge)
+ msm_hdmi_hpd_disable(priv->hdmi);
+
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
@@ -549,15 +539,13 @@ err_put_phy:
return ret;
}
-static int msm_hdmi_dev_remove(struct platform_device *pdev)
+static void msm_hdmi_dev_remove(struct platform_device *pdev)
{
struct hdmi *hdmi = dev_get_drvdata(&pdev->dev);
component_del(&pdev->dev, &msm_hdmi_ops);
msm_hdmi_put_phy(hdmi);
-
- return 0;
}
static const struct of_device_id msm_hdmi_dt_match[] = {
@@ -572,7 +560,7 @@ static const struct of_device_id msm_hdmi_dt_match[] = {
static struct platform_driver msm_hdmi_driver = {
.probe = msm_hdmi_dev_probe,
- .remove = msm_hdmi_dev_remove,
+ .remove_new = msm_hdmi_dev_remove,
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index e8dbee50637f..ec5786440391 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -224,14 +224,13 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
* hdmi bridge:
*/
-struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
-void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
+int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge);
int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
-void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
+void msm_hdmi_hpd_disable(struct hdmi *hdmi);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 9b1391d27ed3..f5e01471b0b0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -11,14 +11,6 @@
#include "msm_kms.h"
#include "hdmi.h"
-void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
-{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
-
- msm_hdmi_hpd_disable(hdmi_bridge);
- drm_bridge_remove(bridge);
-}
-
static void msm_hdmi_power_on(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
@@ -317,7 +309,7 @@ msm_hdmi_hotplug_work(struct work_struct *work)
}
/* initialize bridge */
-struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
+int msm_hdmi_bridge_init(struct hdmi *hdmi)
{
struct drm_bridge *bridge = NULL;
struct hdmi_bridge *hdmi_bridge;
@@ -325,10 +317,8 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
sizeof(*hdmi_bridge), GFP_KERNEL);
- if (!hdmi_bridge) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!hdmi_bridge)
+ return -ENOMEM;
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
@@ -341,17 +331,15 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
- drm_bridge_add(bridge);
+ ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
+ if (ret)
+ return ret;
ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
- goto fail;
+ return ret;
- return bridge;
+ hdmi->bridge = bridge;
-fail:
- if (bridge)
- msm_hdmi_bridge_destroy(bridge);
-
- return ERR_PTR(ret);
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index bfa827b47989..9ce0ffa35417 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -147,9 +147,8 @@ fail:
return ret;
}
-void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
+void msm_hdmi_hpd_disable(struct hdmi *hdmi)
{
- struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 9780107e1cc9..88a3423b7f24 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -3,7 +3,8 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include "hdmi.h"
@@ -176,11 +177,9 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
return 0;
}
-static int msm_hdmi_phy_remove(struct platform_device *pdev)
+static void msm_hdmi_phy_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id msm_hdmi_phy_dt_match[] = {
@@ -199,7 +198,7 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
static struct platform_driver msm_hdmi_phy_platform_driver = {
.probe = msm_hdmi_phy_probe,
- .remove = msm_hdmi_phy_remove,
+ .remove_new = msm_hdmi_phy_remove,
.driver = {
.name = "msm_hdmi_phy",
.of_match_table = msm_hdmi_phy_dt_match,
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index a0a936f80ae3..04d304eed223 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -266,6 +266,9 @@ static int msm_fb_show(struct seq_file *m, void *arg)
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
+};
+
+static struct drm_info_list msm_kms_debugfs_list[] = {
{ "fb", msm_fb_show },
};
@@ -314,8 +317,13 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
+ if (priv->kms) {
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+ }
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 891eff8433a9..3f217b578293 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -7,29 +7,17 @@
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
-#include <linux/kthread.h>
#include <linux/of_address.h>
-#include <linux/sched/mm.h>
#include <linux/uaccess.h>
-#include <uapi/linux/sched/types.h>
-#include <drm/drm_aperture.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_prime.h>
#include <drm/drm_of.h>
-#include <drm/drm_vblank.h>
-#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_debugfs.h"
-#include "msm_fence.h"
-#include "msm_gem.h"
-#include "msm_gpu.h"
#include "msm_kms.h"
-#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
/*
@@ -56,16 +44,6 @@
static void msm_deinit_vram(struct drm_device *ddev);
-static const struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = msm_framebuffer_create,
- .atomic_check = msm_atomic_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
- .atomic_commit_tail = msm_atomic_commit_tail,
-};
-
static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
@@ -83,125 +61,11 @@ DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
#endif
-static irqreturn_t msm_irq(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- BUG_ON(!kms);
-
- return kms->funcs->irq(kms);
-}
-
-static void msm_irq_preinstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- BUG_ON(!kms);
-
- kms->funcs->irq_preinstall(kms);
-}
-
-static int msm_irq_postinstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- BUG_ON(!kms);
-
- if (kms->funcs->irq_postinstall)
- return kms->funcs->irq_postinstall(kms);
-
- return 0;
-}
-
-static int msm_irq_install(struct drm_device *dev, unsigned int irq)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- msm_irq_preinstall(dev);
-
- ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
- if (ret)
- return ret;
-
- kms->irq_requested = true;
-
- ret = msm_irq_postinstall(dev);
- if (ret) {
- free_irq(irq, dev);
- return ret;
- }
-
- return 0;
-}
-
-static void msm_irq_uninstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- kms->funcs->irq_uninstall(kms);
- if (kms->irq_requested)
- free_irq(kms->irq, dev);
-}
-
-struct msm_vblank_work {
- struct work_struct work;
- int crtc_id;
- bool enable;
- struct msm_drm_private *priv;
-};
-
-static void vblank_ctrl_worker(struct work_struct *work)
-{
- struct msm_vblank_work *vbl_work = container_of(work,
- struct msm_vblank_work, work);
- struct msm_drm_private *priv = vbl_work->priv;
- struct msm_kms *kms = priv->kms;
-
- if (vbl_work->enable)
- kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
- else
- kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
-
- kfree(vbl_work);
-}
-
-static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
- int crtc_id, bool enable)
-{
- struct msm_vblank_work *vbl_work;
-
- vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
- if (!vbl_work)
- return -ENOMEM;
-
- INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
-
- vbl_work->crtc_id = crtc_id;
- vbl_work->enable = enable;
- vbl_work->priv = priv;
-
- queue_work(priv->wq, &vbl_work->work);
-
- return 0;
-}
-
static int msm_drm_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *ddev = priv->dev;
- struct msm_kms *kms = priv->kms;
- int i;
/*
* Shutdown the hw if we're far enough along where things might be on.
@@ -212,7 +76,8 @@ static int msm_drm_uninit(struct device *dev)
*/
if (ddev->registered) {
drm_dev_unregister(ddev);
- drm_atomic_helper_shutdown(ddev);
+ if (priv->kms)
+ drm_atomic_helper_shutdown(ddev);
}
/* We must cancel and cleanup any pending vblank enable/disable
@@ -222,36 +87,13 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
- /* clean up event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->event_thread[i].worker)
- kthread_destroy_worker(priv->event_thread[i].worker);
- }
-
msm_gem_shrinker_cleanup(ddev);
- drm_kms_helper_poll_fini(ddev);
-
msm_perf_debugfs_cleanup(priv);
msm_rd_debugfs_cleanup(priv);
- if (kms)
- msm_disp_snapshot_destroy(ddev);
-
- drm_mode_config_cleanup(ddev);
-
- for (i = 0; i < priv->num_bridges; i++)
- drm_bridge_remove(priv->bridges[i]);
- priv->num_bridges = 0;
-
- if (kms) {
- pm_runtime_get_sync(dev);
- msm_irq_uninstall(ddev);
- pm_runtime_put_sync(dev);
- }
-
- if (kms && kms->funcs)
- kms->funcs->destroy(kms);
+ if (priv->kms)
+ msm_drm_kms_uninit(dev);
msm_deinit_vram(ddev);
@@ -265,42 +107,6 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
-struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
-{
- struct msm_gem_address_space *aspace;
- struct msm_mmu *mmu;
- struct device *mdp_dev = dev->dev;
- struct device *mdss_dev = mdp_dev->parent;
- struct device *iommu_dev;
-
- /*
- * IOMMUs can be a part of MDSS device tree binding, or the
- * MDP/DPU device.
- */
- if (device_iommu_mapped(mdp_dev))
- iommu_dev = mdp_dev;
- else
- iommu_dev = mdss_dev;
-
- mmu = msm_iommu_new(iommu_dev, 0);
- if (IS_ERR(mmu))
- return ERR_CAST(mmu);
-
- if (!mmu) {
- drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
- return NULL;
- }
-
- aspace = msm_gem_address_space_create(mmu, "mdp_kms",
- 0x1000, 0x100000000 - 0x1000);
- if (IS_ERR(aspace)) {
- dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
- mmu->funcs->destroy(mmu);
- }
-
- return aspace;
-}
-
bool msm_use_mmu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -406,8 +212,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
- struct msm_kms *kms;
- int ret, i;
+ int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
@@ -443,11 +248,15 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
- drm_mode_config_init(ddev);
+ if (priv->kms_init) {
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ goto err_destroy_wq;
+ }
ret = msm_init_vram(ddev);
if (ret)
- goto err_cleanup_mode_config;
+ goto err_destroy_wq;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -456,95 +265,33 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_deinit_vram;
- /* the fw fb could be anywhere in memory */
- ret = drm_aperture_remove_framebuffers(drv);
+ ret = msm_gem_shrinker_init(ddev);
if (ret)
goto err_msm_uninit;
- msm_gem_shrinker_init(ddev);
-
if (priv->kms_init) {
- ret = priv->kms_init(ddev);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to load kms\n");
- priv->kms = NULL;
+ ret = msm_drm_kms_init(dev, drv);
+ if (ret)
goto err_msm_uninit;
- }
- kms = priv->kms;
} else {
/* valid only for the dummy headless case, where of_node=NULL */
WARN_ON(dev->of_node);
- kms = NULL;
- }
-
- /* Enable normalization of plane zpos */
- ddev->mode_config.normalize_zpos = true;
-
- if (kms) {
- kms->dev = ddev;
- ret = kms->funcs->hw_init(kms);
- if (ret) {
- DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
- goto err_msm_uninit;
- }
- }
-
- drm_helper_move_panel_connectors_to_head(ddev);
-
- ddev->mode_config.funcs = &mode_config_funcs;
- ddev->mode_config.helper_private = &mode_config_helper_funcs;
-
- for (i = 0; i < priv->num_crtcs; i++) {
- /* initialize event thread */
- priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
- priv->event_thread[i].dev = ddev;
- priv->event_thread[i].worker = kthread_create_worker(0,
- "crtc_event:%d", priv->event_thread[i].crtc_id);
- if (IS_ERR(priv->event_thread[i].worker)) {
- ret = PTR_ERR(priv->event_thread[i].worker);
- DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].worker = NULL;
- goto err_msm_uninit;
- }
-
- sched_set_fifo(priv->event_thread[i].worker->task);
- }
-
- ret = drm_vblank_init(ddev, priv->num_crtcs);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
- goto err_msm_uninit;
- }
-
- if (kms) {
- pm_runtime_get_sync(dev);
- ret = msm_irq_install(ddev, kms->irq);
- pm_runtime_put_sync(dev);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
- goto err_msm_uninit;
- }
+ ddev->driver_features &= ~DRIVER_MODESET;
+ ddev->driver_features &= ~DRIVER_ATOMIC;
}
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_msm_uninit;
- if (kms) {
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
- }
- drm_mode_config_reset(ddev);
-
ret = msm_debugfs_late_init(ddev);
if (ret)
goto err_msm_uninit;
- drm_kms_helper_poll_init(ddev);
-
- if (kms)
+ if (priv->kms_init) {
+ drm_kms_helper_poll_init(ddev);
msm_fbdev_setup(ddev);
+ }
return 0;
@@ -555,8 +302,7 @@ err_msm_uninit:
err_deinit_vram:
msm_deinit_vram(ddev);
-err_cleanup_mode_config:
- drm_mode_config_cleanup(ddev);
+err_destroy_wq:
destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@@ -636,30 +382,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
context_close(ctx);
}
-int msm_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = crtc->index;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- if (!kms)
- return -ENXIO;
- drm_dbg_vbl(dev, "crtc=%u", pipe);
- return vblank_ctrl_queue_work(priv, pipe, true);
-}
-
-void msm_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = crtc->index;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- if (!kms)
- return;
- drm_dbg_vbl(dev, "crtc=%u", pipe);
- vblank_ctrl_queue_work(priv, pipe, false);
-}
-
/*
* DRM ioctls:
*/
@@ -1086,10 +808,7 @@ static const struct drm_driver msm_driver = {
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
@@ -1105,33 +824,6 @@ static const struct drm_driver msm_driver = {
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
-int msm_pm_prepare(struct device *dev)
-{
- struct msm_drm_private *priv = dev_get_drvdata(dev);
- struct drm_device *ddev = priv ? priv->dev : NULL;
-
- if (!priv || !priv->kms)
- return 0;
-
- return drm_mode_config_helper_suspend(ddev);
-}
-
-void msm_pm_complete(struct device *dev)
-{
- struct msm_drm_private *priv = dev_get_drvdata(dev);
- struct drm_device *ddev = priv ? priv->dev : NULL;
-
- if (!priv || !priv->kms)
- return;
-
- drm_mode_config_helper_resume(ddev);
-}
-
-static const struct dev_pm_ops msm_pm_ops = {
- .prepare = msm_pm_prepare,
- .complete = msm_pm_complete,
-};
-
/*
* Componentized driver support:
*/
@@ -1233,7 +925,8 @@ const struct component_master_ops msm_drm_ops = {
};
int msm_drv_probe(struct device *master_dev,
- int (*kms_init)(struct drm_device *dev))
+ int (*kms_init)(struct drm_device *dev),
+ struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct component_match *match = NULL;
@@ -1243,6 +936,7 @@ int msm_drv_probe(struct device *master_dev,
if (!priv)
return -ENOMEM;
+ priv->kms = kms;
priv->kms_init = kms_init;
dev_set_drvdata(master_dev, priv);
@@ -1278,39 +972,19 @@ int msm_drv_probe(struct device *master_dev,
static int msm_pdev_probe(struct platform_device *pdev)
{
- return msm_drv_probe(&pdev->dev, NULL);
+ return msm_drv_probe(&pdev->dev, NULL, NULL);
}
-static int msm_pdev_remove(struct platform_device *pdev)
+static void msm_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
-
- return 0;
-}
-
-void msm_drv_shutdown(struct platform_device *pdev)
-{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct drm_device *drm = priv ? priv->dev : NULL;
-
- /*
- * Shutdown the hw if we're far enough along where things might be on.
- * If we run this too early, we'll end up panicking in any variety of
- * places. Since we don't register the drm device until late in
- * msm_drm_init, drm_dev->registered is used as an indicator that the
- * shutdown will be successful.
- */
- if (drm && drm->registered && priv->kms)
- drm_atomic_helper_shutdown(drm);
}
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
- .remove = msm_pdev_remove,
- .shutdown = msm_drv_shutdown,
+ .remove_new = msm_pdev_remove,
.driver = {
.name = "msm",
- .pm = &msm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e13a8cbd61c9..cd5bf658df66 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -65,6 +65,12 @@ enum msm_dp_controller {
MSM_DP_CONTROLLER_COUNT,
};
+enum msm_dsi_controller {
+ MSM_DSI_CONTROLLER_0,
+ MSM_DSI_CONTROLLER_1,
+ MSM_DSI_CONTROLLER_COUNT,
+};
+
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -97,7 +103,6 @@ struct msm_display_topology {
/* Commit/Event thread specific structure */
struct msm_drm_thread {
struct drm_device *dev;
- unsigned int crtc_id;
struct kthread_worker *worker;
};
@@ -117,7 +122,7 @@ struct msm_drm_private {
struct hdmi *hdmi;
/* DSI is shared by mdp4 and mdp5 */
- struct msm_dsi *dsi[2];
+ struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT];
struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
@@ -198,13 +203,9 @@ struct msm_drm_private {
struct workqueue_struct *wq;
unsigned int num_crtcs;
- struct drm_crtc *crtcs[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
- unsigned int num_bridges;
- struct drm_bridge *bridges[MAX_BRIDGES];
-
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@@ -217,7 +218,7 @@ struct msm_drm_private {
} vram;
struct notifier_block vmap_notifier;
- struct shrinker shrinker;
+ struct shrinker *shrinker;
struct drm_atomic_state *pm_state;
@@ -279,10 +280,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
#endif
-void msm_gem_shrinker_init(struct drm_device *dev);
+int msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
@@ -340,6 +340,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
+bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
@@ -369,6 +370,10 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return false;
}
+static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
@@ -558,12 +563,13 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
extern const struct component_master_ops msm_drm_ops;
-int msm_pm_prepare(struct device *dev);
-void msm_pm_complete(struct device *dev);
+int msm_kms_pm_prepare(struct device *dev);
+void msm_kms_pm_complete(struct device *dev);
int msm_drv_probe(struct device *dev,
- int (*kms_init)(struct drm_device *dev));
-void msm_drv_shutdown(struct platform_device *pdev);
+ int (*kms_init)(struct drm_device *dev),
+ struct msm_kms *kms);
+void msm_kms_shutdown(struct platform_device *pdev);
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index bf1e17dc4550..030bedac632d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -25,9 +25,9 @@ module_param(fbdev, bool, 0600);
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
-FB_GEN_DEFAULT_DEFERRED_SYS_OPS(msm_fbdev,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area)
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 20cfd86d2b32..db1e748daa75 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -222,9 +222,7 @@ static void put_pages(struct drm_gem_object *obj)
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **p;
msm_gem_assert_locked(obj);
@@ -234,16 +232,29 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
return ERR_PTR(-EBUSY);
}
- p = get_pages(obj);
- if (IS_ERR(p))
- return p;
+ return get_pages(obj);
+}
+
+/*
+ * Update the pin count of the object, call under lru.lock
+ */
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ msm_gem_assert_locked(obj);
+
+ to_msm_bo(obj)->pin_count++;
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+}
+
+static void pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
- msm_obj->pin_count++;
- update_lru_locked(obj);
+ msm_gem_pin_obj_locked(obj);
mutex_unlock(&priv->lru.lock);
-
- return p;
}
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
@@ -252,6 +263,8 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
msm_gem_lock(obj);
p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (!IS_ERR(p))
+ pin_obj_locked(obj);
msm_gem_unlock(obj);
return p;
@@ -463,7 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
- int ret, prot = IOMMU_READ;
+ int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
@@ -480,11 +493,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
- if (ret)
- msm_gem_unpin_locked(obj);
-
- return ret;
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -509,14 +518,11 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
*/
void msm_gem_unpin_active(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&priv->lru.lock);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru_active(obj);
- mutex_unlock(&priv->lru.lock);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -539,8 +545,10 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
- if (!ret)
+ if (!ret) {
*iova = vma->iova;
+ pin_obj_locked(obj);
+ }
return ret;
}
@@ -599,9 +607,6 @@ static int clear_iova(struct drm_gem_object *obj,
if (!vma)
return 0;
- if (msm_gem_vma_inuse(vma))
- return -EBUSY;
-
msm_gem_vma_purge(vma);
msm_gem_vma_close(vma);
del_vma(vma);
@@ -652,7 +657,6 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
- msm_gem_vma_unpin(vma);
msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
@@ -703,6 +707,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (IS_ERR(pages))
return ERR_CAST(pages);
+ pin_obj_locked(obj);
+
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
@@ -981,11 +987,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
} else {
name = comm = NULL;
}
- seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+ seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
name, comm ? ":" : "", comm ? comm : "",
vma->aspace, vma->iova,
- vma->mapped ? "mapped" : "unmapped",
- msm_gem_vma_inuse(vma));
+ vma->mapped ? "mapped" : "unmapped");
kfree(comm);
}
@@ -1234,6 +1239,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
@@ -1290,6 +1299,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2bd6846c83a9..8ddef5443140 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -59,24 +59,16 @@ struct msm_fence_context;
struct msm_gem_vma {
struct drm_mm_node node;
- spinlock_t lock;
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
bool mapped;
- int inuse;
- uint32_t fence_mask;
- uint32_t fence[MSM_GPU_MAX_RINGS];
- struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
};
struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
u64 range_start, u64 range_end);
-bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
void msm_gem_vma_purge(struct msm_gem_vma *vma);
-void msm_gem_vma_unpin(struct msm_gem_vma *vma);
-void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
void msm_gem_vma_close(struct msm_gem_vma *vma);
@@ -142,6 +134,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -297,15 +290,13 @@ struct msm_gem_submit {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
-#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
+#define BO_PINNED 0x2000 /* obj (pages) is pinned and on active list */
uint32_t flags;
union {
- struct msm_gem_object *obj;
+ struct drm_gem_object *obj;
uint32_t handle;
};
uint64_t iova;
- struct msm_gem_vma *vma;
} bos[];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c1d91863df05..5f68e31a3e4e 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -11,21 +11,6 @@
#include "msm_drv.h"
#include "msm_gem.h"
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- /* Ensure the mmap offset is initialized. We lazily initialize it,
- * so if it has not been first mmap'd directly as a GEM object, the
- * mmap offset will not be already initialized.
- */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- return ret;
-
- return drm_gem_prime_mmap(obj, vma);
-}
-
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index f38296ad8743..5a7d48c02c4b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -34,8 +34,7 @@ static bool can_block(struct shrink_control *sc)
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct msm_drm_private *priv =
- container_of(shrinker, struct msm_drm_private, shrinker);
+ struct msm_drm_private *priv = shrinker->private_data;
unsigned count = priv->lru.dontneed.count;
if (can_swap())
@@ -100,8 +99,7 @@ active_evict(struct drm_gem_object *obj)
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct msm_drm_private *priv =
- container_of(shrinker, struct msm_drm_private, shrinker);
+ struct msm_drm_private *priv = shrinker->private_data;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
@@ -148,10 +146,11 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
};
- int ret;
+ unsigned long ret = SHRINK_STOP;
fs_reclaim_acquire(GFP_KERNEL);
- ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
+ if (priv->shrinker)
+ ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
fs_reclaim_release(GFP_KERNEL);
return ret;
@@ -210,16 +209,24 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
*
* This function registers and sets up the msm shrinker.
*/
-void msm_gem_shrinker_init(struct drm_device *dev)
+int msm_gem_shrinker_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- priv->shrinker.count_objects = msm_gem_shrinker_count;
- priv->shrinker.scan_objects = msm_gem_shrinker_scan;
- priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
+
+ priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
+ if (!priv->shrinker)
+ return -ENOMEM;
+
+ priv->shrinker->count_objects = msm_gem_shrinker_count;
+ priv->shrinker->scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker->private_data = priv;
+
+ shrinker_register(priv->shrinker);
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+
+ return 0;
}
/**
@@ -232,8 +239,8 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- if (priv->shrinker.nr_deferred) {
+ if (priv->shrinker) {
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+ shrinker_free(priv->shrinker);
}
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 63c96416e183..99744de6c05a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -165,7 +165,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
drm_gem_object_get(obj);
- submit->bos[i].obj = to_msm_bo(obj);
+ submit->bos[i].obj = obj;
}
out_unlock:
@@ -251,7 +251,7 @@ out:
static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
unsigned cleanup_flags)
{
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
unsigned flags = submit->bos[i].flags & cleanup_flags;
/*
@@ -261,10 +261,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
*/
submit->bos[i].flags &= ~cleanup_flags;
- if (flags & BO_VMA_PINNED)
- msm_gem_vma_unpin(submit->bos[i].vma);
-
- if (flags & BO_OBJ_PINNED)
+ if (flags & BO_PINNED)
msm_gem_unpin_locked(obj);
if (flags & BO_LOCKED)
@@ -273,7 +270,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
+ unsigned cleanup_flags = BO_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
@@ -287,7 +284,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
retry:
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = submit->bos[i].obj;
if (slow_locked == i)
slow_locked = -1;
@@ -295,7 +292,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = dma_resv_lock_interruptible(msm_obj->base.resv,
+ ret = dma_resv_lock_interruptible(obj->resv,
&submit->ticket);
if (ret)
goto fail;
@@ -321,9 +318,9 @@ fail:
submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
- struct msm_gem_object *msm_obj = submit->bos[contended].obj;
+ struct drm_gem_object *obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
+ ret = dma_resv_lock_slow_interruptible(obj->resv,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -346,7 +343,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
/* NOTE: _reserve_shared() must happen before
@@ -384,12 +381,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
static int submit_pin_objects(struct msm_gem_submit *submit)
{
+ struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
struct msm_gem_vma *vma;
/* if locking succeeded, pin bo: */
@@ -403,9 +401,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
- submit->bos[i].vma = vma;
-
if (vma->iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
@@ -416,6 +411,20 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
}
}
+ /*
+ * A second loop while holding the LRU lock (a) avoids acquiring/dropping
+ * the LRU lock for each individual bo, while (b) avoiding holding the
+ * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
+ * get_pages() which could trigger reclaim.. and if we held the LRU lock
+ * could trigger deadlock with the shrinker).
+ */
+ mutex_lock(&priv->lru.lock);
+ for (i = 0; i < submit->nr_bos; i++) {
+ msm_gem_pin_obj_locked(submit->bos[i].obj);
+ submit->bos[i].flags |= BO_PINNED;
+ }
+ mutex_unlock(&priv->lru.lock);
+
return ret;
}
@@ -424,7 +433,7 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
dma_resv_add_fence(obj->resv, submit->user_fence,
@@ -436,7 +445,7 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint64_t *iova, bool *valid)
+ struct drm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -455,7 +464,7 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
}
/* process the reloc's and patch up the cmdstream as needed: */
-static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
{
uint32_t i, last_offset = 0;
@@ -473,7 +482,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_get_vaddr_locked(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -497,7 +506,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* offset in dwords: */
off = submit_reloc.submit_offset / 4;
- if ((off >= (obj->base.size / 4)) ||
+ if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
@@ -524,7 +533,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
}
out:
- msm_gem_put_vaddr_locked(&obj->base);
+ msm_gem_put_vaddr_locked(obj);
return ret;
}
@@ -539,13 +548,13 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
unsigned i;
if (error)
- cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
+ cleanup_flags |= BO_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = submit->bos[i].obj;
submit_cleanup_bo(submit, i, cleanup_flags);
if (error)
- drm_gem_object_put(&msm_obj->base);
+ drm_gem_object_put(obj);
}
}
@@ -554,7 +563,7 @@ void msm_submit_retire(struct msm_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
drm_gem_object_put(obj);
}
@@ -861,17 +870,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
for (i = 0; i < args->nr_cmds; i++) {
- struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
uint64_t iova;
ret = submit_bo(submit, submit->cmd[i].idx,
- &msm_obj, &iova, NULL);
+ &obj, &iova, NULL);
if (ret)
goto out;
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
- msm_obj->base.size / 4)) {
+ obj->size / 4)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
@@ -882,7 +891,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->valid)
continue;
- ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
+ if (!gpu->allow_relocs) {
+ if (submit->cmd[i].nr_relocs) {
+ DRM_ERROR("relocs not allowed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ continue;
+ }
+
+ ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 98287ed99960..11e842dda73c 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,41 +38,12 @@ msm_gem_address_space_get(struct msm_gem_address_space *aspace)
return aspace;
}
-bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
-{
- bool ret = true;
-
- spin_lock(&vma->lock);
-
- if (vma->inuse > 0)
- goto out;
-
- while (vma->fence_mask) {
- unsigned idx = ffs(vma->fence_mask) - 1;
-
- if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
- goto out;
-
- vma->fence_mask &= ~BIT(idx);
- }
-
- ret = false;
-
-out:
- spin_unlock(&vma->lock);
-
- return ret;
-}
-
/* Actually unmap memory for the vma */
void msm_gem_vma_purge(struct msm_gem_vma *vma)
{
struct msm_gem_address_space *aspace = vma->aspace;
unsigned size = vma->node.size;
- /* Print a message if we try to purge a vma in use */
- GEM_WARN_ON(msm_gem_vma_inuse(vma));
-
/* Don't do anything if the memory isn't mapped */
if (!vma->mapped)
return;
@@ -82,33 +53,6 @@ void msm_gem_vma_purge(struct msm_gem_vma *vma)
vma->mapped = false;
}
-static void vma_unpin_locked(struct msm_gem_vma *vma)
-{
- if (GEM_WARN_ON(!vma->inuse))
- return;
- if (!GEM_WARN_ON(!vma->iova))
- vma->inuse--;
-}
-
-/* Remove reference counts for the mapping */
-void msm_gem_vma_unpin(struct msm_gem_vma *vma)
-{
- spin_lock(&vma->lock);
- vma_unpin_locked(vma);
- spin_unlock(&vma->lock);
-}
-
-/* Replace pin reference with fence: */
-void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
-{
- spin_lock(&vma->lock);
- vma->fctx[fctx->index] = fctx;
- vma->fence[fctx->index] = fctx->last_fence;
- vma->fence_mask |= BIT(fctx->index);
- vma_unpin_locked(vma);
- spin_unlock(&vma->lock);
-}
-
/* Map and pin vma: */
int
msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
@@ -120,11 +64,6 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
if (GEM_WARN_ON(!vma->iova))
return -EINVAL;
- /* Increase the usage counter */
- spin_lock(&vma->lock);
- vma->inuse++;
- spin_unlock(&vma->lock);
-
if (vma->mapped)
return 0;
@@ -146,9 +85,6 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
if (ret) {
vma->mapped = false;
- spin_lock(&vma->lock);
- vma->inuse--;
- spin_unlock(&vma->lock);
}
return ret;
@@ -159,7 +95,7 @@ void msm_gem_vma_close(struct msm_gem_vma *vma)
{
struct msm_gem_address_space *aspace = vma->aspace;
- GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
+ GEM_WARN_ON(vma->mapped);
spin_lock(&aspace->lock);
if (vma->iova)
@@ -179,7 +115,6 @@ struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
if (!vma)
return NULL;
- spin_lock_init(&vma->lock);
vma->aspace = aspace;
return vma;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 52db90e34ead..7f64c6667300 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -219,36 +219,36 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct msm_gem_object *obj, u64 iova, bool full)
+ struct drm_gem_object *obj, u64 iova, bool full)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
- state_bo->size = obj->base.size;
+ state_bo->size = obj->size;
state_bo->iova = iova;
- BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name));
+ BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
- memcpy(state_bo->name, obj->name, sizeof(state_bo->name));
+ memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(&obj->base);
- ptr = msm_gem_get_vaddr_active(&obj->base);
- msm_gem_unlock(&obj->base);
+ msm_gem_lock(obj);
+ ptr = msm_gem_get_vaddr_active(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->base.size);
- msm_gem_put_vaddr(&obj->base);
+ memcpy(state_bo->data, ptr, obj->size);
+ msm_gem_put_vaddr(obj);
}
out:
state->nr_bos++;
@@ -749,13 +749,11 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- WARN_ON(!mutex_is_locked(&gpu->lock));
-
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_hw_init(gpu);
+ mutex_lock(&gpu->lock);
- submit->seqno = submit->hw_fence->seqno;
+ msm_gpu_hw_init(gpu);
update_sw_cntrs(gpu);
@@ -781,8 +779,11 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
- pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
+
+ mutex_unlock(&gpu->lock);
+
+ pm_runtime_put(&gpu->pdev->dev);
}
/*
@@ -897,7 +898,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
- DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 7a4fa1b8655b..4252e3839fbc 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -285,6 +285,15 @@ struct msm_gpu {
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
+ /**
+ * @allow_relocs: allow relocs in SUBMIT ioctl
+ *
+ * Mesa won't use relocs for driver version 1.4.0 and later. This
+ * switch-over happened early enough in mesa a6xx bringup that we
+ * can disallow relocs for a6xx and newer.
+ */
+ bool allow_relocs;
+
struct thermal_cooling_device *cooling;
};
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
new file mode 100644
index 000000000000..84c21ec2ceea
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_vblank.h>
+
+#include "disp/msm_disp_snapshot.h"
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = msm_framebuffer_create,
+ .atomic_check = msm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = msm_atomic_commit_tail,
+};
+
+static irqreturn_t msm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
+}
+
+static int msm_irq_install(struct drm_device *dev, unsigned int irq)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int ret;
+
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
+
+ msm_irq_preinstall(dev);
+
+ ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
+ if (ret)
+ return ret;
+
+ kms->irq_requested = true;
+
+ ret = msm_irq_postinstall(dev);
+ if (ret) {
+ free_irq(irq, dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ kms->funcs->irq_uninstall(kms);
+ if (kms->irq_requested)
+ free_irq(kms->irq, dev);
+}
+
+struct msm_vblank_work {
+ struct work_struct work;
+ struct drm_crtc *crtc;
+ bool enable;
+ struct msm_drm_private *priv;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+ struct msm_vblank_work *vbl_work = container_of(work,
+ struct msm_vblank_work, work);
+ struct msm_drm_private *priv = vbl_work->priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (vbl_work->enable)
+ kms->funcs->enable_vblank(kms, vbl_work->crtc);
+ else
+ kms->funcs->disable_vblank(kms, vbl_work->crtc);
+
+ kfree(vbl_work);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct msm_vblank_work *vbl_work;
+
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+ if (!vbl_work)
+ return -ENOMEM;
+
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
+
+ vbl_work->crtc = crtc;
+ vbl_work->enable = enable;
+ vbl_work->priv = priv;
+
+ queue_work(priv->wq, &vbl_work->work);
+
+ return 0;
+}
+
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+ drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ return vblank_ctrl_queue_work(priv, crtc, true);
+}
+
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+ drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ vblank_ctrl_queue_work(priv, crtc, false);
+}
+
+struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+{
+ struct msm_gem_address_space *aspace;
+ struct msm_mmu *mmu;
+ struct device *mdp_dev = dev->dev;
+ struct device *mdss_dev = mdp_dev->parent;
+ struct device *iommu_dev;
+
+ /*
+ * IOMMUs can be a part of MDSS device tree binding, or the
+ * MDP/DPU device.
+ */
+ if (device_iommu_mapped(mdp_dev))
+ iommu_dev = mdp_dev;
+ else
+ iommu_dev = mdss_dev;
+
+ mmu = msm_iommu_new(iommu_dev, 0);
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ if (!mmu) {
+ drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
+ return NULL;
+ }
+
+ aspace = msm_gem_address_space_create(mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000);
+ if (IS_ERR(aspace)) {
+ dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
+ mmu->funcs->destroy(mmu);
+ }
+
+ return aspace;
+}
+
+void msm_drm_kms_uninit(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ BUG_ON(!kms);
+
+ /* clean up event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->event_thread[i].worker)
+ kthread_destroy_worker(priv->event_thread[i].worker);
+ }
+
+ drm_kms_helper_poll_fini(ddev);
+
+ msm_disp_snapshot_destroy(ddev);
+
+ pm_runtime_get_sync(dev);
+ msm_irq_uninstall(ddev);
+ pm_runtime_put_sync(dev);
+
+ if (kms && kms->funcs)
+ kms->funcs->destroy(kms);
+}
+
+int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv->dev;
+ struct msm_kms *kms = priv->kms;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* the fw fb could be anywhere in memory */
+ ret = drm_aperture_remove_framebuffers(drv);
+ if (ret)
+ return ret;
+
+ ret = priv->kms_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
+ priv->kms = NULL;
+ return ret;
+ }
+
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ kms->dev = ddev;
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
+ goto err_msm_uninit;
+ }
+
+ drm_helper_move_panel_connectors_to_head(ddev);
+
+ drm_for_each_crtc(crtc, ddev) {
+ struct msm_drm_thread *ev_thread;
+
+ /* initialize event thread */
+ ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
+ ev_thread->dev = ddev;
+ ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
+ if (IS_ERR(ev_thread->worker)) {
+ ret = PTR_ERR(ev_thread->worker);
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ ev_thread->worker = NULL;
+ goto err_msm_uninit;
+ }
+
+ sched_set_fifo(ev_thread->worker->task);
+ }
+
+ ret = drm_vblank_init(ddev, priv->num_crtcs);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
+ goto err_msm_uninit;
+ }
+
+ pm_runtime_get_sync(dev);
+ ret = msm_irq_install(ddev, kms->irq);
+ pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
+ goto err_msm_uninit;
+ }
+
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+
+ drm_mode_config_reset(ddev);
+
+ return 0;
+
+err_msm_uninit:
+ return ret;
+}
+
+int msm_kms_pm_prepare(struct device *dev)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
+
+ if (!priv || !priv->kms)
+ return 0;
+
+ return drm_mode_config_helper_suspend(ddev);
+}
+
+void msm_kms_pm_complete(struct device *dev)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
+
+ if (!priv || !priv->kms)
+ return;
+
+ drm_mode_config_helper_resume(ddev);
+}
+
+void msm_kms_shutdown(struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *drm = priv ? priv->dev : NULL;
+
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (drm && drm->registered && priv->kms)
+ drm_atomic_helper_shutdown(drm);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 086a3f1ff956..44aa435d68ce 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -195,4 +195,7 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
drm_for_each_crtc_reverse(crtc, dev) \
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
+int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
+void msm_drm_kms_uninit(struct device *dev);
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 798bd4f3b662..6865db1e3ce8 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -10,10 +10,12 @@
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include "msm_drv.h"
+#include "msm_mdss.h"
#include "msm_kms.h"
#define HW_REV 0x0
@@ -26,16 +28,6 @@
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
-struct msm_mdss_data {
- u32 ubwc_version;
- /* can be read from register 0x58 */
- u32 ubwc_dec_version;
- u32 ubwc_swizzle;
- u32 ubwc_static;
- u32 highest_bank_bit;
- u32 macrotile_mode;
-};
-
struct msm_mdss {
struct device *dev;
@@ -185,12 +177,6 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
return 0;
}
-#define UBWC_1_0 0x10000000
-#define UBWC_2_0 0x20000000
-#define UBWC_3_0 0x30000000
-#define UBWC_4_0 0x40000000
-#define UBWC_4_3 0x40030000
-
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
@@ -205,10 +191,10 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
(data->highest_bank_bit & 0x3) << 4 |
(data->macrotile_mode & 0x1) << 12;
- if (data->ubwc_version == UBWC_3_0)
+ if (data->ubwc_enc_version == UBWC_3_0)
value |= BIT(10);
- if (data->ubwc_version == UBWC_1_0)
+ if (data->ubwc_enc_version == UBWC_1_0)
value |= BIT(8);
writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
@@ -224,7 +210,7 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
- if (data->ubwc_version == UBWC_3_0) {
+ if (data->ubwc_enc_version == UBWC_3_0) {
writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} else {
@@ -236,6 +222,18 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
}
}
+const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
+{
+ struct msm_mdss *mdss;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ mdss = dev_get_drvdata(dev);
+
+ return mdss->mdss_data;
+}
+
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
@@ -268,6 +266,10 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
* UBWC_n and the rest of params comes from hw data.
*/
switch (msm_mdss->mdss_data->ubwc_dec_version) {
+ case 0: /* no UBWC */
+ case UBWC_1_0:
+ /* do nothing */
+ break;
case UBWC_2_0:
msm_mdss_setup_ubwc_dec_20(msm_mdss);
break;
@@ -495,25 +497,35 @@ static int mdss_probe(struct platform_device *pdev)
return 0;
}
-static int mdss_remove(struct platform_device *pdev)
+static void mdss_remove(struct platform_device *pdev)
{
struct msm_mdss *mdss = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
msm_mdss_destroy(mdss);
-
- return 0;
}
+static const struct msm_mdss_data msm8998_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_1_0,
+ .highest_bank_bit = 2,
+};
+
+static const struct msm_mdss_data qcm2290_data = {
+ /* no UBWC */
+ .highest_bank_bit = 0x2,
+};
+
static const struct msm_mdss_data sc7180_data = {
- .ubwc_version = UBWC_2_0,
+ .ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
+ .highest_bank_bit = 0x3,
};
static const struct msm_mdss_data sc7280_data = {
- .ubwc_version = UBWC_3_0,
+ .ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
@@ -522,14 +534,14 @@ static const struct msm_mdss_data sc7280_data = {
};
static const struct msm_mdss_data sc8180x_data = {
- .ubwc_version = UBWC_3_0,
+ .ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct msm_mdss_data sc8280xp_data = {
- .ubwc_version = UBWC_4_0,
+ .ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
@@ -538,13 +550,13 @@ static const struct msm_mdss_data sc8280xp_data = {
};
static const struct msm_mdss_data sdm845_data = {
- .ubwc_version = UBWC_2_0,
+ .ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2,
};
static const struct msm_mdss_data sm6350_data = {
- .ubwc_version = UBWC_2_0,
+ .ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
@@ -552,20 +564,28 @@ static const struct msm_mdss_data sm6350_data = {
};
static const struct msm_mdss_data sm8150_data = {
- .ubwc_version = UBWC_3_0,
+ .ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2,
};
static const struct msm_mdss_data sm6115_data = {
- .ubwc_version = UBWC_1_0,
+ .ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
+ .highest_bank_bit = 0x1,
+};
+
+static const struct msm_mdss_data sm6125_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_3_0,
+ .ubwc_swizzle = 1,
+ .highest_bank_bit = 1,
};
static const struct msm_mdss_data sm8250_data = {
- .ubwc_version = UBWC_4_0,
+ .ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
@@ -575,7 +595,7 @@ static const struct msm_mdss_data sm8250_data = {
};
static const struct msm_mdss_data sm8550_data = {
- .ubwc_version = UBWC_4_0,
+ .ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
@@ -583,17 +603,17 @@ static const struct msm_mdss_data sm8550_data = {
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
-
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
- { .compatible = "qcom,msm8998-mdss" },
- { .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
+ { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
{ .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
+ { .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
@@ -607,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mdss_dt_match);
static struct platform_driver mdss_platform_driver = {
.probe = mdss_probe,
- .remove = mdss_remove,
+ .remove_new = mdss_remove,
.driver = {
.name = "msm-mdss",
.of_match_table = mdss_dt_match,
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
new file mode 100644
index 000000000000..02bbab42adbc
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#ifndef __MSM_MDSS_H__
+#define __MSM_MDSS_H__
+
+struct msm_mdss_data {
+ u32 ubwc_enc_version;
+ /* can be read from register 0x58 */
+ u32 ubwc_dec_version;
+ u32 ubwc_swizzle;
+ u32 ubwc_static;
+ u32 highest_bank_bit;
+ u32 macrotile_mode;
+};
+
+#define UBWC_1_0 0x10000000
+#define UBWC_2_0 0x20000000
+#define UBWC_3_0 0x30000000
+#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
+
+const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev);
+
+#endif /* __MSM_MDSS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8d5687d5ed78..5adc51f7ab59 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -310,7 +310,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
uint64_t iova, uint32_t size, bool full)
{
- struct msm_gem_object *obj = submit->bos[idx].obj;
+ struct drm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
const char *buf;
@@ -318,7 +318,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
- size = obj->base.size;
+ size = obj->size;
}
/*
@@ -335,7 +335,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
- buf = msm_gem_get_vaddr_active(&obj->base);
+ buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
@@ -343,7 +343,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
- msm_gem_put_vaddr_locked(&obj->base);
+ msm_gem_put_vaddr_locked(obj);
}
/* called under gpu->lock */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index b60199184409..95257ab0185d 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -16,25 +16,26 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct msm_gem_submit *submit = to_msm_submit(job);
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
int i;
msm_fence_init(submit->hw_fence, fctx);
+ submit->seqno = submit->hw_fence->seqno;
+
+ mutex_lock(&priv->lru.lock);
+
for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct drm_gem_object *obj = submit->bos[i].obj;
- msm_gem_vma_unpin_fenced(submit->bos[i].vma, fctx);
msm_gem_unpin_active(obj);
- submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
+ submit->bos[i].flags &= ~BO_PINNED;
}
- /* TODO move submit path over to using a per-ring lock.. */
- mutex_lock(&gpu->lock);
+ mutex_unlock(&priv->lru.lock);
msm_gpu_submit(gpu, submit);
- mutex_unlock(&gpu->lock);
-
return dma_fence_get(submit->hw_fence);
}
@@ -94,8 +95,9 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
sched_timeout = MAX_SCHEDULE_TIMEOUT;
ret = drm_sched_init(&ring->sched, &msm_sched_ops,
- num_hw_submissions, 0, sched_timeout,
- NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
+ DRM_SCHED_PRIORITY_COUNT,
+ num_hw_submissions, 0, sched_timeout,
+ NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
if (ret) {
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 698b333abccd..0d6beb8cd39a 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -30,6 +30,8 @@ struct msm_gpu_submit_stats {
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
+ /* Introduced on A7xx */
+ volatile uint32_t bv_fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;